]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.8-201111010529.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.8-201111010529.patch
1 diff -urNp linux-3.0.8/arch/alpha/include/asm/elf.h linux-3.0.8/arch/alpha/include/asm/elf.h
2 --- linux-3.0.8/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.8/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.8/arch/alpha/include/asm/pgtable.h linux-3.0.8/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.8/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.8/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.8/arch/alpha/kernel/module.c linux-3.0.8/arch/alpha/kernel/module.c
40 --- linux-3.0.8/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.8/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.8/arch/alpha/kernel/osf_sys.c linux-3.0.8/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.8/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.8/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.8/arch/alpha/mm/fault.c linux-3.0.8/arch/alpha/mm/fault.c
86 --- linux-3.0.8/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.8/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.8/arch/arm/include/asm/elf.h linux-3.0.8/arch/arm/include/asm/elf.h
245 --- linux-3.0.8/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.8/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.8/arch/arm/include/asm/kmap_types.h linux-3.0.8/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.8/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.8/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.8/arch/arm/include/asm/uaccess.h linux-3.0.8/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.8/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.8/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.8/arch/arm/kernel/armksyms.c linux-3.0.8/arch/arm/kernel/armksyms.c
344 --- linux-3.0.8/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.8/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.8/arch/arm/kernel/process.c linux-3.0.8/arch/arm/kernel/process.c
358 --- linux-3.0.8/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.8/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.8/arch/arm/kernel/traps.c linux-3.0.8/arch/arm/kernel/traps.c
382 --- linux-3.0.8/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.8/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.8/arch/arm/lib/copy_from_user.S linux-3.0.8/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.8/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.8/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.8/arch/arm/lib/copy_to_user.S linux-3.0.8/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.8/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.8/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.8/arch/arm/lib/uaccess.S linux-3.0.8/arch/arm/lib/uaccess.S
456 --- linux-3.0.8/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.8/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.8/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.8/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.8/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.8/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.8/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.8/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.8/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.8/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.8/arch/arm/mm/fault.c linux-3.0.8/arch/arm/mm/fault.c
536 --- linux-3.0.8/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.8/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.8/arch/arm/mm/mmap.c linux-3.0.8/arch/arm/mm/mmap.c
587 --- linux-3.0.8/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.8/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.8/arch/avr32/include/asm/elf.h linux-3.0.8/arch/avr32/include/asm/elf.h
639 --- linux-3.0.8/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.8/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.8/arch/avr32/include/asm/kmap_types.h linux-3.0.8/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.8/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.8/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.8/arch/avr32/mm/fault.c linux-3.0.8/arch/avr32/mm/fault.c
671 --- linux-3.0.8/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.8/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.8/arch/frv/include/asm/kmap_types.h linux-3.0.8/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.8/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.8/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.8/arch/frv/mm/elf-fdpic.c linux-3.0.8/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.8/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.8/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.8/arch/ia64/include/asm/elf.h linux-3.0.8/arch/ia64/include/asm/elf.h
757 --- linux-3.0.8/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.8/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.8/arch/ia64/include/asm/pgtable.h linux-3.0.8/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.8/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.8/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.8/arch/ia64/include/asm/spinlock.h linux-3.0.8/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.8/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.8/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.8/arch/ia64/include/asm/uaccess.h linux-3.0.8/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.8/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.8/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.8/arch/ia64/kernel/module.c linux-3.0.8/arch/ia64/kernel/module.c
837 --- linux-3.0.8/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.8/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.8/arch/ia64/kernel/sys_ia64.c linux-3.0.8/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.8/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.8/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.8/arch/ia64/kernel/vmlinux.lds.S linux-3.0.8/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.8/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.8/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.8/arch/ia64/mm/fault.c linux-3.0.8/arch/ia64/mm/fault.c
975 --- linux-3.0.8/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.8/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.8/arch/ia64/mm/hugetlbpage.c linux-3.0.8/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.8/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.8/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.8/arch/ia64/mm/init.c linux-3.0.8/arch/ia64/mm/init.c
1039 --- linux-3.0.8/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.8/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.8/arch/m32r/lib/usercopy.c linux-3.0.8/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.8/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.8/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.8/arch/mips/include/asm/elf.h linux-3.0.8/arch/mips/include/asm/elf.h
1085 --- linux-3.0.8/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.8/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.8/arch/mips/include/asm/page.h linux-3.0.8/arch/mips/include/asm/page.h
1109 --- linux-3.0.8/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.8/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.8/arch/mips/include/asm/system.h linux-3.0.8/arch/mips/include/asm/system.h
1121 --- linux-3.0.8/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.8/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.8/arch/mips/kernel/binfmt_elfn32.c linux-3.0.8/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.8/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.8/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.8/arch/mips/kernel/binfmt_elfo32.c linux-3.0.8/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.8/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.8/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.8/arch/mips/kernel/process.c linux-3.0.8/arch/mips/kernel/process.c
1166 --- linux-3.0.8/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.8/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.8/arch/mips/mm/fault.c linux-3.0.8/arch/mips/mm/fault.c
1185 --- linux-3.0.8/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.8/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.8/arch/mips/mm/mmap.c linux-3.0.8/arch/mips/mm/mmap.c
1212 --- linux-3.0.8/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.8/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.8/arch/parisc/include/asm/elf.h linux-3.0.8/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.8/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.8/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.8/arch/parisc/include/asm/pgtable.h linux-3.0.8/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.8/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.8/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.8/arch/parisc/kernel/module.c linux-3.0.8/arch/parisc/kernel/module.c
1314 --- linux-3.0.8/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.8/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.8/arch/parisc/kernel/sys_parisc.c linux-3.0.8/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.8/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.8/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.8/arch/parisc/kernel/traps.c linux-3.0.8/arch/parisc/kernel/traps.c
1447 --- linux-3.0.8/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.8/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.8/arch/parisc/mm/fault.c linux-3.0.8/arch/parisc/mm/fault.c
1461 --- linux-3.0.8/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.8/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.8/arch/powerpc/include/asm/elf.h linux-3.0.8/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.8/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.8/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.8/arch/powerpc/include/asm/kmap_types.h linux-3.0.8/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.8/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.8/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.8/arch/powerpc/include/asm/mman.h linux-3.0.8/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.8/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.8/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.8/arch/powerpc/include/asm/page_64.h linux-3.0.8/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.8/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.8/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.8/arch/powerpc/include/asm/page.h linux-3.0.8/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.8/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.8/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.8/arch/powerpc/include/asm/pgtable.h linux-3.0.8/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.8/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.8/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.8/arch/powerpc/include/asm/pte-hash32.h linux-3.0.8/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.8/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.8/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.8/arch/powerpc/include/asm/reg.h linux-3.0.8/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.8/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.8/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.8/arch/powerpc/include/asm/system.h linux-3.0.8/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.8/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.8/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.8/arch/powerpc/include/asm/uaccess.h linux-3.0.8/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.8/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.8/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.8/arch/powerpc/kernel/exceptions-64e.S linux-3.0.8/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.8/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.8/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.8/arch/powerpc/kernel/exceptions-64s.S linux-3.0.8/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.8/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.8/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.8/arch/powerpc/kernel/module_32.c linux-3.0.8/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.8/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.8/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.8/arch/powerpc/kernel/module.c linux-3.0.8/arch/powerpc/kernel/module.c
2033 --- linux-3.0.8/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.8/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.8/arch/powerpc/kernel/process.c linux-3.0.8/arch/powerpc/kernel/process.c
2075 --- linux-3.0.8/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.8/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.8/arch/powerpc/kernel/signal_32.c linux-3.0.8/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.8/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.8/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.8/arch/powerpc/kernel/signal_64.c linux-3.0.8/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.8/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.8/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.8/arch/powerpc/kernel/traps.c linux-3.0.8/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.8/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.8/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.8/arch/powerpc/kernel/vdso.c linux-3.0.8/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.8/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.8/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.8/arch/powerpc/lib/usercopy_64.c linux-3.0.8/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.8/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.8/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.8/arch/powerpc/mm/fault.c linux-3.0.8/arch/powerpc/mm/fault.c
2278 --- linux-3.0.8/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.8/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.8/arch/powerpc/mm/mmap_64.c linux-3.0.8/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.8/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.8/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.8/arch/powerpc/mm/slice.c linux-3.0.8/arch/powerpc/mm/slice.c
2411 --- linux-3.0.8/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.8/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.8/arch/s390/include/asm/elf.h linux-3.0.8/arch/s390/include/asm/elf.h
2480 --- linux-3.0.8/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.8/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.8/arch/s390/include/asm/system.h linux-3.0.8/arch/s390/include/asm/system.h
2508 --- linux-3.0.8/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.8/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.8/arch/s390/include/asm/uaccess.h linux-3.0.8/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.8/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.8/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.8/arch/s390/kernel/module.c linux-3.0.8/arch/s390/kernel/module.c
2555 --- linux-3.0.8/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.8/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.8/arch/s390/kernel/process.c linux-3.0.8/arch/s390/kernel/process.c
2629 --- linux-3.0.8/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.8/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.8/arch/s390/kernel/setup.c linux-3.0.8/arch/s390/kernel/setup.c
2672 --- linux-3.0.8/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.8/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.8/arch/s390/mm/mmap.c linux-3.0.8/arch/s390/mm/mmap.c
2684 --- linux-3.0.8/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.8/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.8/arch/score/include/asm/system.h linux-3.0.8/arch/score/include/asm/system.h
2733 --- linux-3.0.8/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.8/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.8/arch/score/kernel/process.c linux-3.0.8/arch/score/kernel/process.c
2745 --- linux-3.0.8/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.8/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.8/arch/sh/mm/mmap.c linux-3.0.8/arch/sh/mm/mmap.c
2757 --- linux-3.0.8/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.8/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.8/arch/sparc/include/asm/atomic_64.h linux-3.0.8/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.8/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.8/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.8/arch/sparc/include/asm/cache.h linux-3.0.8/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.8/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.8/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.8/arch/sparc/include/asm/elf_32.h linux-3.0.8/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.8/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.8/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.8/arch/sparc/include/asm/elf_64.h linux-3.0.8/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.8/arch/sparc/include/asm/elf_64.h 2011-10-24 08:05:21.000000000 -0400
3059 +++ linux-3.0.8/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.8/arch/sparc/include/asm/pgtable_32.h linux-3.0.8/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.8/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.8/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.8/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.8/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.8/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.8/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.8/arch/sparc/include/asm/spinlock_64.h linux-3.0.8/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.8/arch/sparc/include/asm/spinlock_64.h 2011-10-24 08:05:30.000000000 -0400
3127 +++ linux-3.0.8/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:55:27.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.8/arch/sparc/include/asm/thread_info_32.h linux-3.0.8/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.8/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.8/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.8/arch/sparc/include/asm/thread_info_64.h linux-3.0.8/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.8/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.8/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.8/arch/sparc/include/asm/uaccess_32.h linux-3.0.8/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.8/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.8/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.8/arch/sparc/include/asm/uaccess_64.h linux-3.0.8/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.8/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.8/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.8/arch/sparc/include/asm/uaccess.h linux-3.0.8/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.8/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.8/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.8/arch/sparc/kernel/Makefile linux-3.0.8/arch/sparc/kernel/Makefile
3366 --- linux-3.0.8/arch/sparc/kernel/Makefile 2011-10-24 08:05:30.000000000 -0400
3367 +++ linux-3.0.8/arch/sparc/kernel/Makefile 2011-10-16 21:55:27.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.8/arch/sparc/kernel/process_32.c linux-3.0.8/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.8/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.8/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.8/arch/sparc/kernel/process_64.c linux-3.0.8/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.8/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.8/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.8/arch/sparc/kernel/sys_sparc_32.c linux-3.0.8/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.8/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.8/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.8/arch/sparc/kernel/sys_sparc_64.c linux-3.0.8/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.8/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.8/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.8/arch/sparc/kernel/traps_32.c linux-3.0.8/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.8/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.8/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.8/arch/sparc/kernel/traps_64.c linux-3.0.8/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.8/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.8/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.8/arch/sparc/kernel/unaligned_64.c linux-3.0.8/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.8/arch/sparc/kernel/unaligned_64.c 2011-10-24 08:05:21.000000000 -0400
3798 +++ linux-3.0.8/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.8/arch/sparc/lib/atomic_64.S linux-3.0.8/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.8/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.8/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.8/arch/sparc/lib/ksyms.c linux-3.0.8/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.8/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.8/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.8/arch/sparc/lib/Makefile linux-3.0.8/arch/sparc/lib/Makefile
4068 --- linux-3.0.8/arch/sparc/lib/Makefile 2011-10-24 08:05:21.000000000 -0400
4069 +++ linux-3.0.8/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.8/arch/sparc/Makefile linux-3.0.8/arch/sparc/Makefile
4080 --- linux-3.0.8/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.8/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.8/arch/sparc/mm/fault_32.c linux-3.0.8/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.8/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.8/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.8/arch/sparc/mm/fault_64.c linux-3.0.8/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.8/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.8/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.8/arch/sparc/mm/hugetlbpage.c linux-3.0.8/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.8/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.8/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.8/arch/sparc/mm/init_32.c linux-3.0.8/arch/sparc/mm/init_32.c
4971 --- linux-3.0.8/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.8/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.8/arch/sparc/mm/Makefile linux-3.0.8/arch/sparc/mm/Makefile
5008 --- linux-3.0.8/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.8/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.8/arch/sparc/mm/srmmu.c linux-3.0.8/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.8/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.8/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.8/arch/um/include/asm/kmap_types.h linux-3.0.8/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.8/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.8/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.8/arch/um/include/asm/page.h linux-3.0.8/arch/um/include/asm/page.h
5048 --- linux-3.0.8/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.8/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.8/arch/um/kernel/process.c linux-3.0.8/arch/um/kernel/process.c
5061 --- linux-3.0.8/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.8/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.8/arch/um/Makefile linux-3.0.8/arch/um/Makefile
5087 --- linux-3.0.8/arch/um/Makefile 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.8/arch/um/Makefile 2011-10-20 04:46:01.000000000 -0400
5089 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
5090 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5091 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5092
5093 +ifdef CONSTIFY_PLUGIN
5094 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5095 +endif
5096 +
5097 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5098
5099 #This will adjust *FLAGS accordingly to the platform.
5100 diff -urNp linux-3.0.8/arch/um/sys-i386/shared/sysdep/system.h linux-3.0.8/arch/um/sys-i386/shared/sysdep/system.h
5101 --- linux-3.0.8/arch/um/sys-i386/shared/sysdep/system.h 2011-07-21 22:17:23.000000000 -0400
5102 +++ linux-3.0.8/arch/um/sys-i386/shared/sysdep/system.h 2011-10-20 04:46:01.000000000 -0400
5103 @@ -17,7 +17,7 @@
5104 # define AT_VECTOR_SIZE_ARCH 1
5105 #endif
5106
5107 -extern unsigned long arch_align_stack(unsigned long sp);
5108 +#define arch_align_stack(x) ((x) & ~0xfUL)
5109
5110 void default_idle(void);
5111
5112 diff -urNp linux-3.0.8/arch/um/sys-i386/syscalls.c linux-3.0.8/arch/um/sys-i386/syscalls.c
5113 --- linux-3.0.8/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5114 +++ linux-3.0.8/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5115 @@ -11,6 +11,21 @@
5116 #include "asm/uaccess.h"
5117 #include "asm/unistd.h"
5118
5119 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5120 +{
5121 + unsigned long pax_task_size = TASK_SIZE;
5122 +
5123 +#ifdef CONFIG_PAX_SEGMEXEC
5124 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5125 + pax_task_size = SEGMEXEC_TASK_SIZE;
5126 +#endif
5127 +
5128 + if (len > pax_task_size || addr > pax_task_size - len)
5129 + return -EINVAL;
5130 +
5131 + return 0;
5132 +}
5133 +
5134 /*
5135 * The prototype on i386 is:
5136 *
5137 diff -urNp linux-3.0.8/arch/um/sys-x86_64/shared/sysdep/system.h linux-3.0.8/arch/um/sys-x86_64/shared/sysdep/system.h
5138 --- linux-3.0.8/arch/um/sys-x86_64/shared/sysdep/system.h 2011-07-21 22:17:23.000000000 -0400
5139 +++ linux-3.0.8/arch/um/sys-x86_64/shared/sysdep/system.h 2011-10-20 04:46:01.000000000 -0400
5140 @@ -17,7 +17,7 @@
5141 # define AT_VECTOR_SIZE_ARCH 1
5142 #endif
5143
5144 -extern unsigned long arch_align_stack(unsigned long sp);
5145 +#define arch_align_stack(x) ((x) & ~0xfUL)
5146
5147 void default_idle(void);
5148
5149 diff -urNp linux-3.0.8/arch/x86/boot/bitops.h linux-3.0.8/arch/x86/boot/bitops.h
5150 --- linux-3.0.8/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5151 +++ linux-3.0.8/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5152 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5153 u8 v;
5154 const u32 *p = (const u32 *)addr;
5155
5156 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5157 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5158 return v;
5159 }
5160
5161 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5162
5163 static inline void set_bit(int nr, void *addr)
5164 {
5165 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5166 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5167 }
5168
5169 #endif /* BOOT_BITOPS_H */
5170 diff -urNp linux-3.0.8/arch/x86/boot/boot.h linux-3.0.8/arch/x86/boot/boot.h
5171 --- linux-3.0.8/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5172 +++ linux-3.0.8/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5173 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5174 static inline u16 ds(void)
5175 {
5176 u16 seg;
5177 - asm("movw %%ds,%0" : "=rm" (seg));
5178 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5179 return seg;
5180 }
5181
5182 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5183 static inline int memcmp(const void *s1, const void *s2, size_t len)
5184 {
5185 u8 diff;
5186 - asm("repe; cmpsb; setnz %0"
5187 + asm volatile("repe; cmpsb; setnz %0"
5188 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5189 return diff;
5190 }
5191 diff -urNp linux-3.0.8/arch/x86/boot/compressed/head_32.S linux-3.0.8/arch/x86/boot/compressed/head_32.S
5192 --- linux-3.0.8/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5193 +++ linux-3.0.8/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5194 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5195 notl %eax
5196 andl %eax, %ebx
5197 #else
5198 - movl $LOAD_PHYSICAL_ADDR, %ebx
5199 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5200 #endif
5201
5202 /* Target address to relocate to for decompression */
5203 @@ -162,7 +162,7 @@ relocated:
5204 * and where it was actually loaded.
5205 */
5206 movl %ebp, %ebx
5207 - subl $LOAD_PHYSICAL_ADDR, %ebx
5208 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5209 jz 2f /* Nothing to be done if loaded at compiled addr. */
5210 /*
5211 * Process relocations.
5212 @@ -170,8 +170,7 @@ relocated:
5213
5214 1: subl $4, %edi
5215 movl (%edi), %ecx
5216 - testl %ecx, %ecx
5217 - jz 2f
5218 + jecxz 2f
5219 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5220 jmp 1b
5221 2:
5222 diff -urNp linux-3.0.8/arch/x86/boot/compressed/head_64.S linux-3.0.8/arch/x86/boot/compressed/head_64.S
5223 --- linux-3.0.8/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5224 +++ linux-3.0.8/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5225 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5226 notl %eax
5227 andl %eax, %ebx
5228 #else
5229 - movl $LOAD_PHYSICAL_ADDR, %ebx
5230 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5231 #endif
5232
5233 /* Target address to relocate to for decompression */
5234 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5235 notq %rax
5236 andq %rax, %rbp
5237 #else
5238 - movq $LOAD_PHYSICAL_ADDR, %rbp
5239 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5240 #endif
5241
5242 /* Target address to relocate to for decompression */
5243 diff -urNp linux-3.0.8/arch/x86/boot/compressed/Makefile linux-3.0.8/arch/x86/boot/compressed/Makefile
5244 --- linux-3.0.8/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5245 +++ linux-3.0.8/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5246 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5247 KBUILD_CFLAGS += $(cflags-y)
5248 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5249 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5250 +ifdef CONSTIFY_PLUGIN
5251 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5252 +endif
5253
5254 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5255 GCOV_PROFILE := n
5256 diff -urNp linux-3.0.8/arch/x86/boot/compressed/misc.c linux-3.0.8/arch/x86/boot/compressed/misc.c
5257 --- linux-3.0.8/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5258 +++ linux-3.0.8/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5259 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5260 case PT_LOAD:
5261 #ifdef CONFIG_RELOCATABLE
5262 dest = output;
5263 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5264 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5265 #else
5266 dest = (void *)(phdr->p_paddr);
5267 #endif
5268 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5269 error("Destination address too large");
5270 #endif
5271 #ifndef CONFIG_RELOCATABLE
5272 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5273 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5274 error("Wrong destination address");
5275 #endif
5276
5277 diff -urNp linux-3.0.8/arch/x86/boot/compressed/relocs.c linux-3.0.8/arch/x86/boot/compressed/relocs.c
5278 --- linux-3.0.8/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5279 +++ linux-3.0.8/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5280 @@ -13,8 +13,11 @@
5281
5282 static void die(char *fmt, ...);
5283
5284 +#include "../../../../include/generated/autoconf.h"
5285 +
5286 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5287 static Elf32_Ehdr ehdr;
5288 +static Elf32_Phdr *phdr;
5289 static unsigned long reloc_count, reloc_idx;
5290 static unsigned long *relocs;
5291
5292 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5293 }
5294 }
5295
5296 +static void read_phdrs(FILE *fp)
5297 +{
5298 + unsigned int i;
5299 +
5300 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5301 + if (!phdr) {
5302 + die("Unable to allocate %d program headers\n",
5303 + ehdr.e_phnum);
5304 + }
5305 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5306 + die("Seek to %d failed: %s\n",
5307 + ehdr.e_phoff, strerror(errno));
5308 + }
5309 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5310 + die("Cannot read ELF program headers: %s\n",
5311 + strerror(errno));
5312 + }
5313 + for(i = 0; i < ehdr.e_phnum; i++) {
5314 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5315 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5316 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5317 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5318 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5319 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5320 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5321 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5322 + }
5323 +
5324 +}
5325 +
5326 static void read_shdrs(FILE *fp)
5327 {
5328 - int i;
5329 + unsigned int i;
5330 Elf32_Shdr shdr;
5331
5332 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5333 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5334
5335 static void read_strtabs(FILE *fp)
5336 {
5337 - int i;
5338 + unsigned int i;
5339 for (i = 0; i < ehdr.e_shnum; i++) {
5340 struct section *sec = &secs[i];
5341 if (sec->shdr.sh_type != SHT_STRTAB) {
5342 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5343
5344 static void read_symtabs(FILE *fp)
5345 {
5346 - int i,j;
5347 + unsigned int i,j;
5348 for (i = 0; i < ehdr.e_shnum; i++) {
5349 struct section *sec = &secs[i];
5350 if (sec->shdr.sh_type != SHT_SYMTAB) {
5351 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5352
5353 static void read_relocs(FILE *fp)
5354 {
5355 - int i,j;
5356 + unsigned int i,j;
5357 + uint32_t base;
5358 +
5359 for (i = 0; i < ehdr.e_shnum; i++) {
5360 struct section *sec = &secs[i];
5361 if (sec->shdr.sh_type != SHT_REL) {
5362 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5363 die("Cannot read symbol table: %s\n",
5364 strerror(errno));
5365 }
5366 + base = 0;
5367 + for (j = 0; j < ehdr.e_phnum; j++) {
5368 + if (phdr[j].p_type != PT_LOAD )
5369 + continue;
5370 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5371 + continue;
5372 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5373 + break;
5374 + }
5375 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5376 Elf32_Rel *rel = &sec->reltab[j];
5377 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5378 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5379 rel->r_info = elf32_to_cpu(rel->r_info);
5380 }
5381 }
5382 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5383
5384 static void print_absolute_symbols(void)
5385 {
5386 - int i;
5387 + unsigned int i;
5388 printf("Absolute symbols\n");
5389 printf(" Num: Value Size Type Bind Visibility Name\n");
5390 for (i = 0; i < ehdr.e_shnum; i++) {
5391 struct section *sec = &secs[i];
5392 char *sym_strtab;
5393 Elf32_Sym *sh_symtab;
5394 - int j;
5395 + unsigned int j;
5396
5397 if (sec->shdr.sh_type != SHT_SYMTAB) {
5398 continue;
5399 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5400
5401 static void print_absolute_relocs(void)
5402 {
5403 - int i, printed = 0;
5404 + unsigned int i, printed = 0;
5405
5406 for (i = 0; i < ehdr.e_shnum; i++) {
5407 struct section *sec = &secs[i];
5408 struct section *sec_applies, *sec_symtab;
5409 char *sym_strtab;
5410 Elf32_Sym *sh_symtab;
5411 - int j;
5412 + unsigned int j;
5413 if (sec->shdr.sh_type != SHT_REL) {
5414 continue;
5415 }
5416 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5417
5418 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5419 {
5420 - int i;
5421 + unsigned int i;
5422 /* Walk through the relocations */
5423 for (i = 0; i < ehdr.e_shnum; i++) {
5424 char *sym_strtab;
5425 Elf32_Sym *sh_symtab;
5426 struct section *sec_applies, *sec_symtab;
5427 - int j;
5428 + unsigned int j;
5429 struct section *sec = &secs[i];
5430
5431 if (sec->shdr.sh_type != SHT_REL) {
5432 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5433 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5434 continue;
5435 }
5436 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5437 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5438 + continue;
5439 +
5440 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5441 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5442 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5443 + continue;
5444 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5445 + continue;
5446 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5447 + continue;
5448 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5449 + continue;
5450 +#endif
5451 +
5452 switch (r_type) {
5453 case R_386_NONE:
5454 case R_386_PC32:
5455 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5456
5457 static void emit_relocs(int as_text)
5458 {
5459 - int i;
5460 + unsigned int i;
5461 /* Count how many relocations I have and allocate space for them. */
5462 reloc_count = 0;
5463 walk_relocs(count_reloc);
5464 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5465 fname, strerror(errno));
5466 }
5467 read_ehdr(fp);
5468 + read_phdrs(fp);
5469 read_shdrs(fp);
5470 read_strtabs(fp);
5471 read_symtabs(fp);
5472 diff -urNp linux-3.0.8/arch/x86/boot/cpucheck.c linux-3.0.8/arch/x86/boot/cpucheck.c
5473 --- linux-3.0.8/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5474 +++ linux-3.0.8/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5475 @@ -74,7 +74,7 @@ static int has_fpu(void)
5476 u16 fcw = -1, fsw = -1;
5477 u32 cr0;
5478
5479 - asm("movl %%cr0,%0" : "=r" (cr0));
5480 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5481 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5482 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5483 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5484 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5485 {
5486 u32 f0, f1;
5487
5488 - asm("pushfl ; "
5489 + asm volatile("pushfl ; "
5490 "pushfl ; "
5491 "popl %0 ; "
5492 "movl %0,%1 ; "
5493 @@ -115,7 +115,7 @@ static void get_flags(void)
5494 set_bit(X86_FEATURE_FPU, cpu.flags);
5495
5496 if (has_eflag(X86_EFLAGS_ID)) {
5497 - asm("cpuid"
5498 + asm volatile("cpuid"
5499 : "=a" (max_intel_level),
5500 "=b" (cpu_vendor[0]),
5501 "=d" (cpu_vendor[1]),
5502 @@ -124,7 +124,7 @@ static void get_flags(void)
5503
5504 if (max_intel_level >= 0x00000001 &&
5505 max_intel_level <= 0x0000ffff) {
5506 - asm("cpuid"
5507 + asm volatile("cpuid"
5508 : "=a" (tfms),
5509 "=c" (cpu.flags[4]),
5510 "=d" (cpu.flags[0])
5511 @@ -136,7 +136,7 @@ static void get_flags(void)
5512 cpu.model += ((tfms >> 16) & 0xf) << 4;
5513 }
5514
5515 - asm("cpuid"
5516 + asm volatile("cpuid"
5517 : "=a" (max_amd_level)
5518 : "a" (0x80000000)
5519 : "ebx", "ecx", "edx");
5520 @@ -144,7 +144,7 @@ static void get_flags(void)
5521 if (max_amd_level >= 0x80000001 &&
5522 max_amd_level <= 0x8000ffff) {
5523 u32 eax = 0x80000001;
5524 - asm("cpuid"
5525 + asm volatile("cpuid"
5526 : "+a" (eax),
5527 "=c" (cpu.flags[6]),
5528 "=d" (cpu.flags[1])
5529 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5530 u32 ecx = MSR_K7_HWCR;
5531 u32 eax, edx;
5532
5533 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5534 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5535 eax &= ~(1 << 15);
5536 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5537 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5538
5539 get_flags(); /* Make sure it really did something */
5540 err = check_flags();
5541 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5542 u32 ecx = MSR_VIA_FCR;
5543 u32 eax, edx;
5544
5545 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5546 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5547 eax |= (1<<1)|(1<<7);
5548 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5549 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5550
5551 set_bit(X86_FEATURE_CX8, cpu.flags);
5552 err = check_flags();
5553 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5554 u32 eax, edx;
5555 u32 level = 1;
5556
5557 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5558 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5559 - asm("cpuid"
5560 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5561 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5562 + asm volatile("cpuid"
5563 : "+a" (level), "=d" (cpu.flags[0])
5564 : : "ecx", "ebx");
5565 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5566 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5567
5568 err = check_flags();
5569 }
5570 diff -urNp linux-3.0.8/arch/x86/boot/header.S linux-3.0.8/arch/x86/boot/header.S
5571 --- linux-3.0.8/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5572 +++ linux-3.0.8/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5573 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5574 # single linked list of
5575 # struct setup_data
5576
5577 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5578 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5579
5580 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5581 #define VO_INIT_SIZE (VO__end - VO__text)
5582 diff -urNp linux-3.0.8/arch/x86/boot/Makefile linux-3.0.8/arch/x86/boot/Makefile
5583 --- linux-3.0.8/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5584 +++ linux-3.0.8/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5585 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5586 $(call cc-option, -fno-stack-protector) \
5587 $(call cc-option, -mpreferred-stack-boundary=2)
5588 KBUILD_CFLAGS += $(call cc-option, -m32)
5589 +ifdef CONSTIFY_PLUGIN
5590 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5591 +endif
5592 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5593 GCOV_PROFILE := n
5594
5595 diff -urNp linux-3.0.8/arch/x86/boot/memory.c linux-3.0.8/arch/x86/boot/memory.c
5596 --- linux-3.0.8/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5597 +++ linux-3.0.8/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5598 @@ -19,7 +19,7 @@
5599
5600 static int detect_memory_e820(void)
5601 {
5602 - int count = 0;
5603 + unsigned int count = 0;
5604 struct biosregs ireg, oreg;
5605 struct e820entry *desc = boot_params.e820_map;
5606 static struct e820entry buf; /* static so it is zeroed */
5607 diff -urNp linux-3.0.8/arch/x86/boot/video.c linux-3.0.8/arch/x86/boot/video.c
5608 --- linux-3.0.8/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5609 +++ linux-3.0.8/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5610 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5611 static unsigned int get_entry(void)
5612 {
5613 char entry_buf[4];
5614 - int i, len = 0;
5615 + unsigned int i, len = 0;
5616 int key;
5617 unsigned int v;
5618
5619 diff -urNp linux-3.0.8/arch/x86/boot/video-vesa.c linux-3.0.8/arch/x86/boot/video-vesa.c
5620 --- linux-3.0.8/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5621 +++ linux-3.0.8/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5622 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5623
5624 boot_params.screen_info.vesapm_seg = oreg.es;
5625 boot_params.screen_info.vesapm_off = oreg.di;
5626 + boot_params.screen_info.vesapm_size = oreg.cx;
5627 }
5628
5629 /*
5630 diff -urNp linux-3.0.8/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.8/arch/x86/crypto/aes-x86_64-asm_64.S
5631 --- linux-3.0.8/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5632 +++ linux-3.0.8/arch/x86/crypto/aes-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5633 @@ -8,6 +8,8 @@
5634 * including this sentence is retained in full.
5635 */
5636
5637 +#include <asm/alternative-asm.h>
5638 +
5639 .extern crypto_ft_tab
5640 .extern crypto_it_tab
5641 .extern crypto_fl_tab
5642 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5643 je B192; \
5644 leaq 32(r9),r9;
5645
5646 +#define ret pax_force_retaddr; ret
5647 +
5648 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5649 movq r1,r2; \
5650 movq r3,r4; \
5651 diff -urNp linux-3.0.8/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.8/arch/x86/crypto/salsa20-x86_64-asm_64.S
5652 --- linux-3.0.8/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5653 +++ linux-3.0.8/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5654 @@ -1,3 +1,5 @@
5655 +#include <asm/alternative-asm.h>
5656 +
5657 # enter ECRYPT_encrypt_bytes
5658 .text
5659 .p2align 5
5660 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5661 add %r11,%rsp
5662 mov %rdi,%rax
5663 mov %rsi,%rdx
5664 + pax_force_retaddr
5665 ret
5666 # bytesatleast65:
5667 ._bytesatleast65:
5668 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
5669 add %r11,%rsp
5670 mov %rdi,%rax
5671 mov %rsi,%rdx
5672 + pax_force_retaddr
5673 ret
5674 # enter ECRYPT_ivsetup
5675 .text
5676 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5677 add %r11,%rsp
5678 mov %rdi,%rax
5679 mov %rsi,%rdx
5680 + pax_force_retaddr
5681 ret
5682 diff -urNp linux-3.0.8/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.8/arch/x86/crypto/twofish-x86_64-asm_64.S
5683 --- linux-3.0.8/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5684 +++ linux-3.0.8/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5685 @@ -21,6 +21,7 @@
5686 .text
5687
5688 #include <asm/asm-offsets.h>
5689 +#include <asm/alternative-asm.h>
5690
5691 #define a_offset 0
5692 #define b_offset 4
5693 @@ -269,6 +270,7 @@ twofish_enc_blk:
5694
5695 popq R1
5696 movq $1,%rax
5697 + pax_force_retaddr
5698 ret
5699
5700 twofish_dec_blk:
5701 @@ -321,4 +323,5 @@ twofish_dec_blk:
5702
5703 popq R1
5704 movq $1,%rax
5705 + pax_force_retaddr
5706 ret
5707 diff -urNp linux-3.0.8/arch/x86/ia32/ia32_aout.c linux-3.0.8/arch/x86/ia32/ia32_aout.c
5708 --- linux-3.0.8/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5709 +++ linux-3.0.8/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5710 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5711 unsigned long dump_start, dump_size;
5712 struct user32 dump;
5713
5714 + memset(&dump, 0, sizeof(dump));
5715 +
5716 fs = get_fs();
5717 set_fs(KERNEL_DS);
5718 has_dumped = 1;
5719 diff -urNp linux-3.0.8/arch/x86/ia32/ia32entry.S linux-3.0.8/arch/x86/ia32/ia32entry.S
5720 --- linux-3.0.8/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5721 +++ linux-3.0.8/arch/x86/ia32/ia32entry.S 2011-10-11 10:44:33.000000000 -0400
5722 @@ -13,7 +13,9 @@
5723 #include <asm/thread_info.h>
5724 #include <asm/segment.h>
5725 #include <asm/irqflags.h>
5726 +#include <asm/pgtable.h>
5727 #include <linux/linkage.h>
5728 +#include <asm/alternative-asm.h>
5729
5730 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5731 #include <linux/elf-em.h>
5732 @@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5733 ENDPROC(native_irq_enable_sysexit)
5734 #endif
5735
5736 + .macro pax_enter_kernel_user
5737 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5738 + call pax_enter_kernel_user
5739 +#endif
5740 + .endm
5741 +
5742 + .macro pax_exit_kernel_user
5743 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5744 + call pax_exit_kernel_user
5745 +#endif
5746 +#ifdef CONFIG_PAX_RANDKSTACK
5747 + pushq %rax
5748 + call pax_randomize_kstack
5749 + popq %rax
5750 +#endif
5751 + .endm
5752 +
5753 + .macro pax_erase_kstack
5754 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5755 + call pax_erase_kstack
5756 +#endif
5757 + .endm
5758 +
5759 /*
5760 * 32bit SYSENTER instruction entry.
5761 *
5762 @@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5763 CFI_REGISTER rsp,rbp
5764 SWAPGS_UNSAFE_STACK
5765 movq PER_CPU_VAR(kernel_stack), %rsp
5766 - addq $(KERNEL_STACK_OFFSET),%rsp
5767 + pax_enter_kernel_user
5768 /*
5769 * No need to follow this irqs on/off section: the syscall
5770 * disabled irqs, here we enable it straight after entry:
5771 @@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5772 CFI_REL_OFFSET rsp,0
5773 pushfq_cfi
5774 /*CFI_REL_OFFSET rflags,0*/
5775 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5776 + GET_THREAD_INFO(%r10)
5777 + movl TI_sysenter_return(%r10), %r10d
5778 CFI_REGISTER rip,r10
5779 pushq_cfi $__USER32_CS
5780 /*CFI_REL_OFFSET cs,0*/
5781 @@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5782 SAVE_ARGS 0,0,1
5783 /* no need to do an access_ok check here because rbp has been
5784 32bit zero extended */
5785 +
5786 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5787 + mov $PAX_USER_SHADOW_BASE,%r10
5788 + add %r10,%rbp
5789 +#endif
5790 +
5791 1: movl (%rbp),%ebp
5792 .section __ex_table,"a"
5793 .quad 1b,ia32_badarg
5794 @@ -168,6 +200,8 @@ sysenter_dispatch:
5795 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5796 jnz sysexit_audit
5797 sysexit_from_sys_call:
5798 + pax_exit_kernel_user
5799 + pax_erase_kstack
5800 andl $~TS_COMPAT,TI_status(%r10)
5801 /* clear IF, that popfq doesn't enable interrupts early */
5802 andl $~0x200,EFLAGS-R11(%rsp)
5803 @@ -194,6 +228,9 @@ sysexit_from_sys_call:
5804 movl %eax,%esi /* 2nd arg: syscall number */
5805 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5806 call audit_syscall_entry
5807 +
5808 + pax_erase_kstack
5809 +
5810 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5811 cmpq $(IA32_NR_syscalls-1),%rax
5812 ja ia32_badsys
5813 @@ -246,6 +283,9 @@ sysenter_tracesys:
5814 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5815 movq %rsp,%rdi /* &pt_regs -> arg1 */
5816 call syscall_trace_enter
5817 +
5818 + pax_erase_kstack
5819 +
5820 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5821 RESTORE_REST
5822 cmpq $(IA32_NR_syscalls-1),%rax
5823 @@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5824 ENTRY(ia32_cstar_target)
5825 CFI_STARTPROC32 simple
5826 CFI_SIGNAL_FRAME
5827 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5828 + CFI_DEF_CFA rsp,0
5829 CFI_REGISTER rip,rcx
5830 /*CFI_REGISTER rflags,r11*/
5831 SWAPGS_UNSAFE_STACK
5832 movl %esp,%r8d
5833 CFI_REGISTER rsp,r8
5834 movq PER_CPU_VAR(kernel_stack),%rsp
5835 +
5836 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5837 + pax_enter_kernel_user
5838 +#endif
5839 +
5840 /*
5841 * No need to follow this irqs on/off section: the syscall
5842 * disabled irqs and here we enable it straight after entry:
5843 */
5844 ENABLE_INTERRUPTS(CLBR_NONE)
5845 - SAVE_ARGS 8,1,1
5846 + SAVE_ARGS 8*6,1,1
5847 movl %eax,%eax /* zero extension */
5848 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5849 movq %rcx,RIP-ARGOFFSET(%rsp)
5850 @@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5851 /* no need to do an access_ok check here because r8 has been
5852 32bit zero extended */
5853 /* hardware stack frame is complete now */
5854 +
5855 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5856 + mov $PAX_USER_SHADOW_BASE,%r10
5857 + add %r10,%r8
5858 +#endif
5859 +
5860 1: movl (%r8),%r9d
5861 .section __ex_table,"a"
5862 .quad 1b,ia32_badarg
5863 @@ -327,6 +378,8 @@ cstar_dispatch:
5864 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5865 jnz sysretl_audit
5866 sysretl_from_sys_call:
5867 + pax_exit_kernel_user
5868 + pax_erase_kstack
5869 andl $~TS_COMPAT,TI_status(%r10)
5870 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5871 movl RIP-ARGOFFSET(%rsp),%ecx
5872 @@ -364,6 +417,9 @@ cstar_tracesys:
5873 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5874 movq %rsp,%rdi /* &pt_regs -> arg1 */
5875 call syscall_trace_enter
5876 +
5877 + pax_erase_kstack
5878 +
5879 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5880 RESTORE_REST
5881 xchgl %ebp,%r9d
5882 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5883 CFI_REL_OFFSET rip,RIP-RIP
5884 PARAVIRT_ADJUST_EXCEPTION_FRAME
5885 SWAPGS
5886 + pax_enter_kernel_user
5887 /*
5888 * No need to follow this irqs on/off section: the syscall
5889 * disabled irqs and here we enable it straight after entry:
5890 @@ -441,6 +498,9 @@ ia32_tracesys:
5891 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5892 movq %rsp,%rdi /* &pt_regs -> arg1 */
5893 call syscall_trace_enter
5894 +
5895 + pax_erase_kstack
5896 +
5897 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5898 RESTORE_REST
5899 cmpq $(IA32_NR_syscalls-1),%rax
5900 @@ -455,6 +515,7 @@ ia32_badsys:
5901
5902 quiet_ni_syscall:
5903 movq $-ENOSYS,%rax
5904 + pax_force_retaddr
5905 ret
5906 CFI_ENDPROC
5907
5908 diff -urNp linux-3.0.8/arch/x86/ia32/ia32_signal.c linux-3.0.8/arch/x86/ia32/ia32_signal.c
5909 --- linux-3.0.8/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5910 +++ linux-3.0.8/arch/x86/ia32/ia32_signal.c 2011-10-06 04:17:55.000000000 -0400
5911 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
5912 }
5913 seg = get_fs();
5914 set_fs(KERNEL_DS);
5915 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5916 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5917 set_fs(seg);
5918 if (ret >= 0 && uoss_ptr) {
5919 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5920 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
5921 */
5922 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5923 size_t frame_size,
5924 - void **fpstate)
5925 + void __user **fpstate)
5926 {
5927 unsigned long sp;
5928
5929 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
5930
5931 if (used_math()) {
5932 sp = sp - sig_xstate_ia32_size;
5933 - *fpstate = (struct _fpstate_ia32 *) sp;
5934 + *fpstate = (struct _fpstate_ia32 __user *) sp;
5935 if (save_i387_xstate_ia32(*fpstate) < 0)
5936 return (void __user *) -1L;
5937 }
5938 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5939 sp -= frame_size;
5940 /* Align the stack pointer according to the i386 ABI,
5941 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5942 - sp = ((sp + 4) & -16ul) - 4;
5943 + sp = ((sp - 12) & -16ul) - 4;
5944 return (void __user *) sp;
5945 }
5946
5947 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5948 * These are actually not used anymore, but left because some
5949 * gdb versions depend on them as a marker.
5950 */
5951 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5952 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5953 } put_user_catch(err);
5954
5955 if (err)
5956 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5957 0xb8,
5958 __NR_ia32_rt_sigreturn,
5959 0x80cd,
5960 - 0,
5961 + 0
5962 };
5963
5964 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5965 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5966
5967 if (ka->sa.sa_flags & SA_RESTORER)
5968 restorer = ka->sa.sa_restorer;
5969 + else if (current->mm->context.vdso)
5970 + /* Return stub is in 32bit vsyscall page */
5971 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5972 else
5973 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5974 - rt_sigreturn);
5975 + restorer = &frame->retcode;
5976 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5977
5978 /*
5979 * Not actually used anymore, but left because some gdb
5980 * versions need it.
5981 */
5982 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5983 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5984 } put_user_catch(err);
5985
5986 if (err)
5987 diff -urNp linux-3.0.8/arch/x86/ia32/sys_ia32.c linux-3.0.8/arch/x86/ia32/sys_ia32.c
5988 --- linux-3.0.8/arch/x86/ia32/sys_ia32.c 2011-07-21 22:17:23.000000000 -0400
5989 +++ linux-3.0.8/arch/x86/ia32/sys_ia32.c 2011-10-06 04:17:55.000000000 -0400
5990 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5991 */
5992 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5993 {
5994 - typeof(ubuf->st_uid) uid = 0;
5995 - typeof(ubuf->st_gid) gid = 0;
5996 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
5997 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
5998 SET_UID(uid, stat->uid);
5999 SET_GID(gid, stat->gid);
6000 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6001 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
6002 }
6003 set_fs(KERNEL_DS);
6004 ret = sys_rt_sigprocmask(how,
6005 - set ? (sigset_t __user *)&s : NULL,
6006 - oset ? (sigset_t __user *)&s : NULL,
6007 + set ? (sigset_t __force_user *)&s : NULL,
6008 + oset ? (sigset_t __force_user *)&s : NULL,
6009 sigsetsize);
6010 set_fs(old_fs);
6011 if (ret)
6012 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
6013 return alarm_setitimer(seconds);
6014 }
6015
6016 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6017 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6018 int options)
6019 {
6020 return compat_sys_wait4(pid, stat_addr, options, NULL);
6021 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
6022 mm_segment_t old_fs = get_fs();
6023
6024 set_fs(KERNEL_DS);
6025 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6026 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6027 set_fs(old_fs);
6028 if (put_compat_timespec(&t, interval))
6029 return -EFAULT;
6030 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
6031 mm_segment_t old_fs = get_fs();
6032
6033 set_fs(KERNEL_DS);
6034 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6035 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6036 set_fs(old_fs);
6037 if (!ret) {
6038 switch (_NSIG_WORDS) {
6039 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6040 if (copy_siginfo_from_user32(&info, uinfo))
6041 return -EFAULT;
6042 set_fs(KERNEL_DS);
6043 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6044 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6045 set_fs(old_fs);
6046 return ret;
6047 }
6048 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6049 return -EFAULT;
6050
6051 set_fs(KERNEL_DS);
6052 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6053 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6054 count);
6055 set_fs(old_fs);
6056
6057 diff -urNp linux-3.0.8/arch/x86/include/asm/alternative-asm.h linux-3.0.8/arch/x86/include/asm/alternative-asm.h
6058 --- linux-3.0.8/arch/x86/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
6059 +++ linux-3.0.8/arch/x86/include/asm/alternative-asm.h 2011-10-07 19:07:23.000000000 -0400
6060 @@ -15,6 +15,20 @@
6061 .endm
6062 #endif
6063
6064 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6065 + .macro pax_force_retaddr rip=0
6066 + btsq $63,\rip(%rsp)
6067 + .endm
6068 + .macro pax_force_fptr ptr
6069 + btsq $63,\ptr
6070 + .endm
6071 +#else
6072 + .macro pax_force_retaddr rip=0
6073 + .endm
6074 + .macro pax_force_fptr ptr
6075 + .endm
6076 +#endif
6077 +
6078 .macro altinstruction_entry orig alt feature orig_len alt_len
6079 .align 8
6080 .quad \orig
6081 diff -urNp linux-3.0.8/arch/x86/include/asm/alternative.h linux-3.0.8/arch/x86/include/asm/alternative.h
6082 --- linux-3.0.8/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
6083 +++ linux-3.0.8/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
6084 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
6085 ".section .discard,\"aw\",@progbits\n" \
6086 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6087 ".previous\n" \
6088 - ".section .altinstr_replacement, \"ax\"\n" \
6089 + ".section .altinstr_replacement, \"a\"\n" \
6090 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6091 ".previous"
6092
6093 diff -urNp linux-3.0.8/arch/x86/include/asm/apic.h linux-3.0.8/arch/x86/include/asm/apic.h
6094 --- linux-3.0.8/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
6095 +++ linux-3.0.8/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
6096 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6097
6098 #ifdef CONFIG_X86_LOCAL_APIC
6099
6100 -extern unsigned int apic_verbosity;
6101 +extern int apic_verbosity;
6102 extern int local_apic_timer_c2_ok;
6103
6104 extern int disable_apic;
6105 diff -urNp linux-3.0.8/arch/x86/include/asm/apm.h linux-3.0.8/arch/x86/include/asm/apm.h
6106 --- linux-3.0.8/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
6107 +++ linux-3.0.8/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
6108 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6109 __asm__ __volatile__(APM_DO_ZERO_SEGS
6110 "pushl %%edi\n\t"
6111 "pushl %%ebp\n\t"
6112 - "lcall *%%cs:apm_bios_entry\n\t"
6113 + "lcall *%%ss:apm_bios_entry\n\t"
6114 "setc %%al\n\t"
6115 "popl %%ebp\n\t"
6116 "popl %%edi\n\t"
6117 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6118 __asm__ __volatile__(APM_DO_ZERO_SEGS
6119 "pushl %%edi\n\t"
6120 "pushl %%ebp\n\t"
6121 - "lcall *%%cs:apm_bios_entry\n\t"
6122 + "lcall *%%ss:apm_bios_entry\n\t"
6123 "setc %%bl\n\t"
6124 "popl %%ebp\n\t"
6125 "popl %%edi\n\t"
6126 diff -urNp linux-3.0.8/arch/x86/include/asm/atomic64_32.h linux-3.0.8/arch/x86/include/asm/atomic64_32.h
6127 --- linux-3.0.8/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
6128 +++ linux-3.0.8/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
6129 @@ -12,6 +12,14 @@ typedef struct {
6130 u64 __aligned(8) counter;
6131 } atomic64_t;
6132
6133 +#ifdef CONFIG_PAX_REFCOUNT
6134 +typedef struct {
6135 + u64 __aligned(8) counter;
6136 +} atomic64_unchecked_t;
6137 +#else
6138 +typedef atomic64_t atomic64_unchecked_t;
6139 +#endif
6140 +
6141 #define ATOMIC64_INIT(val) { (val) }
6142
6143 #ifdef CONFIG_X86_CMPXCHG64
6144 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6145 }
6146
6147 /**
6148 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6149 + * @p: pointer to type atomic64_unchecked_t
6150 + * @o: expected value
6151 + * @n: new value
6152 + *
6153 + * Atomically sets @v to @n if it was equal to @o and returns
6154 + * the old value.
6155 + */
6156 +
6157 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6158 +{
6159 + return cmpxchg64(&v->counter, o, n);
6160 +}
6161 +
6162 +/**
6163 * atomic64_xchg - xchg atomic64 variable
6164 * @v: pointer to type atomic64_t
6165 * @n: value to assign
6166 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6167 }
6168
6169 /**
6170 + * atomic64_set_unchecked - set atomic64 variable
6171 + * @v: pointer to type atomic64_unchecked_t
6172 + * @n: value to assign
6173 + *
6174 + * Atomically sets the value of @v to @n.
6175 + */
6176 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6177 +{
6178 + unsigned high = (unsigned)(i >> 32);
6179 + unsigned low = (unsigned)i;
6180 + asm volatile(ATOMIC64_ALTERNATIVE(set)
6181 + : "+b" (low), "+c" (high)
6182 + : "S" (v)
6183 + : "eax", "edx", "memory"
6184 + );
6185 +}
6186 +
6187 +/**
6188 * atomic64_read - read atomic64 variable
6189 * @v: pointer to type atomic64_t
6190 *
6191 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6192 }
6193
6194 /**
6195 + * atomic64_read_unchecked - read atomic64 variable
6196 + * @v: pointer to type atomic64_unchecked_t
6197 + *
6198 + * Atomically reads the value of @v and returns it.
6199 + */
6200 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6201 +{
6202 + long long r;
6203 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6204 + : "=A" (r), "+c" (v)
6205 + : : "memory"
6206 + );
6207 + return r;
6208 + }
6209 +
6210 +/**
6211 * atomic64_add_return - add and return
6212 * @i: integer value to add
6213 * @v: pointer to type atomic64_t
6214 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6215 return i;
6216 }
6217
6218 +/**
6219 + * atomic64_add_return_unchecked - add and return
6220 + * @i: integer value to add
6221 + * @v: pointer to type atomic64_unchecked_t
6222 + *
6223 + * Atomically adds @i to @v and returns @i + *@v
6224 + */
6225 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6226 +{
6227 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6228 + : "+A" (i), "+c" (v)
6229 + : : "memory"
6230 + );
6231 + return i;
6232 +}
6233 +
6234 /*
6235 * Other variants with different arithmetic operators:
6236 */
6237 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6238 return a;
6239 }
6240
6241 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6242 +{
6243 + long long a;
6244 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6245 + : "=A" (a)
6246 + : "S" (v)
6247 + : "memory", "ecx"
6248 + );
6249 + return a;
6250 +}
6251 +
6252 static inline long long atomic64_dec_return(atomic64_t *v)
6253 {
6254 long long a;
6255 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6256 }
6257
6258 /**
6259 + * atomic64_add_unchecked - add integer to atomic64 variable
6260 + * @i: integer value to add
6261 + * @v: pointer to type atomic64_unchecked_t
6262 + *
6263 + * Atomically adds @i to @v.
6264 + */
6265 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6266 +{
6267 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6268 + : "+A" (i), "+c" (v)
6269 + : : "memory"
6270 + );
6271 + return i;
6272 +}
6273 +
6274 +/**
6275 * atomic64_sub - subtract the atomic64 variable
6276 * @i: integer value to subtract
6277 * @v: pointer to type atomic64_t
6278 diff -urNp linux-3.0.8/arch/x86/include/asm/atomic64_64.h linux-3.0.8/arch/x86/include/asm/atomic64_64.h
6279 --- linux-3.0.8/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6280 +++ linux-3.0.8/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6281 @@ -18,7 +18,19 @@
6282 */
6283 static inline long atomic64_read(const atomic64_t *v)
6284 {
6285 - return (*(volatile long *)&(v)->counter);
6286 + return (*(volatile const long *)&(v)->counter);
6287 +}
6288 +
6289 +/**
6290 + * atomic64_read_unchecked - read atomic64 variable
6291 + * @v: pointer of type atomic64_unchecked_t
6292 + *
6293 + * Atomically reads the value of @v.
6294 + * Doesn't imply a read memory barrier.
6295 + */
6296 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6297 +{
6298 + return (*(volatile const long *)&(v)->counter);
6299 }
6300
6301 /**
6302 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6303 }
6304
6305 /**
6306 + * atomic64_set_unchecked - set atomic64 variable
6307 + * @v: pointer to type atomic64_unchecked_t
6308 + * @i: required value
6309 + *
6310 + * Atomically sets the value of @v to @i.
6311 + */
6312 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6313 +{
6314 + v->counter = i;
6315 +}
6316 +
6317 +/**
6318 * atomic64_add - add integer to atomic64 variable
6319 * @i: integer value to add
6320 * @v: pointer to type atomic64_t
6321 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6322 */
6323 static inline void atomic64_add(long i, atomic64_t *v)
6324 {
6325 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6326 +
6327 +#ifdef CONFIG_PAX_REFCOUNT
6328 + "jno 0f\n"
6329 + LOCK_PREFIX "subq %1,%0\n"
6330 + "int $4\n0:\n"
6331 + _ASM_EXTABLE(0b, 0b)
6332 +#endif
6333 +
6334 + : "=m" (v->counter)
6335 + : "er" (i), "m" (v->counter));
6336 +}
6337 +
6338 +/**
6339 + * atomic64_add_unchecked - add integer to atomic64 variable
6340 + * @i: integer value to add
6341 + * @v: pointer to type atomic64_unchecked_t
6342 + *
6343 + * Atomically adds @i to @v.
6344 + */
6345 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6346 +{
6347 asm volatile(LOCK_PREFIX "addq %1,%0"
6348 : "=m" (v->counter)
6349 : "er" (i), "m" (v->counter));
6350 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6351 */
6352 static inline void atomic64_sub(long i, atomic64_t *v)
6353 {
6354 - asm volatile(LOCK_PREFIX "subq %1,%0"
6355 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6356 +
6357 +#ifdef CONFIG_PAX_REFCOUNT
6358 + "jno 0f\n"
6359 + LOCK_PREFIX "addq %1,%0\n"
6360 + "int $4\n0:\n"
6361 + _ASM_EXTABLE(0b, 0b)
6362 +#endif
6363 +
6364 + : "=m" (v->counter)
6365 + : "er" (i), "m" (v->counter));
6366 +}
6367 +
6368 +/**
6369 + * atomic64_sub_unchecked - subtract the atomic64 variable
6370 + * @i: integer value to subtract
6371 + * @v: pointer to type atomic64_unchecked_t
6372 + *
6373 + * Atomically subtracts @i from @v.
6374 + */
6375 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6376 +{
6377 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6378 : "=m" (v->counter)
6379 : "er" (i), "m" (v->counter));
6380 }
6381 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6382 {
6383 unsigned char c;
6384
6385 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6386 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6387 +
6388 +#ifdef CONFIG_PAX_REFCOUNT
6389 + "jno 0f\n"
6390 + LOCK_PREFIX "addq %2,%0\n"
6391 + "int $4\n0:\n"
6392 + _ASM_EXTABLE(0b, 0b)
6393 +#endif
6394 +
6395 + "sete %1\n"
6396 : "=m" (v->counter), "=qm" (c)
6397 : "er" (i), "m" (v->counter) : "memory");
6398 return c;
6399 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6400 */
6401 static inline void atomic64_inc(atomic64_t *v)
6402 {
6403 + asm volatile(LOCK_PREFIX "incq %0\n"
6404 +
6405 +#ifdef CONFIG_PAX_REFCOUNT
6406 + "jno 0f\n"
6407 + LOCK_PREFIX "decq %0\n"
6408 + "int $4\n0:\n"
6409 + _ASM_EXTABLE(0b, 0b)
6410 +#endif
6411 +
6412 + : "=m" (v->counter)
6413 + : "m" (v->counter));
6414 +}
6415 +
6416 +/**
6417 + * atomic64_inc_unchecked - increment atomic64 variable
6418 + * @v: pointer to type atomic64_unchecked_t
6419 + *
6420 + * Atomically increments @v by 1.
6421 + */
6422 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6423 +{
6424 asm volatile(LOCK_PREFIX "incq %0"
6425 : "=m" (v->counter)
6426 : "m" (v->counter));
6427 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6428 */
6429 static inline void atomic64_dec(atomic64_t *v)
6430 {
6431 - asm volatile(LOCK_PREFIX "decq %0"
6432 + asm volatile(LOCK_PREFIX "decq %0\n"
6433 +
6434 +#ifdef CONFIG_PAX_REFCOUNT
6435 + "jno 0f\n"
6436 + LOCK_PREFIX "incq %0\n"
6437 + "int $4\n0:\n"
6438 + _ASM_EXTABLE(0b, 0b)
6439 +#endif
6440 +
6441 + : "=m" (v->counter)
6442 + : "m" (v->counter));
6443 +}
6444 +
6445 +/**
6446 + * atomic64_dec_unchecked - decrement atomic64 variable
6447 + * @v: pointer to type atomic64_t
6448 + *
6449 + * Atomically decrements @v by 1.
6450 + */
6451 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6452 +{
6453 + asm volatile(LOCK_PREFIX "decq %0\n"
6454 : "=m" (v->counter)
6455 : "m" (v->counter));
6456 }
6457 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6458 {
6459 unsigned char c;
6460
6461 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6462 + asm volatile(LOCK_PREFIX "decq %0\n"
6463 +
6464 +#ifdef CONFIG_PAX_REFCOUNT
6465 + "jno 0f\n"
6466 + LOCK_PREFIX "incq %0\n"
6467 + "int $4\n0:\n"
6468 + _ASM_EXTABLE(0b, 0b)
6469 +#endif
6470 +
6471 + "sete %1\n"
6472 : "=m" (v->counter), "=qm" (c)
6473 : "m" (v->counter) : "memory");
6474 return c != 0;
6475 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6476 {
6477 unsigned char c;
6478
6479 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6480 + asm volatile(LOCK_PREFIX "incq %0\n"
6481 +
6482 +#ifdef CONFIG_PAX_REFCOUNT
6483 + "jno 0f\n"
6484 + LOCK_PREFIX "decq %0\n"
6485 + "int $4\n0:\n"
6486 + _ASM_EXTABLE(0b, 0b)
6487 +#endif
6488 +
6489 + "sete %1\n"
6490 : "=m" (v->counter), "=qm" (c)
6491 : "m" (v->counter) : "memory");
6492 return c != 0;
6493 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6494 {
6495 unsigned char c;
6496
6497 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6498 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6499 +
6500 +#ifdef CONFIG_PAX_REFCOUNT
6501 + "jno 0f\n"
6502 + LOCK_PREFIX "subq %2,%0\n"
6503 + "int $4\n0:\n"
6504 + _ASM_EXTABLE(0b, 0b)
6505 +#endif
6506 +
6507 + "sets %1\n"
6508 : "=m" (v->counter), "=qm" (c)
6509 : "er" (i), "m" (v->counter) : "memory");
6510 return c;
6511 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6512 static inline long atomic64_add_return(long i, atomic64_t *v)
6513 {
6514 long __i = i;
6515 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6516 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6517 +
6518 +#ifdef CONFIG_PAX_REFCOUNT
6519 + "jno 0f\n"
6520 + "movq %0, %1\n"
6521 + "int $4\n0:\n"
6522 + _ASM_EXTABLE(0b, 0b)
6523 +#endif
6524 +
6525 + : "+r" (i), "+m" (v->counter)
6526 + : : "memory");
6527 + return i + __i;
6528 +}
6529 +
6530 +/**
6531 + * atomic64_add_return_unchecked - add and return
6532 + * @i: integer value to add
6533 + * @v: pointer to type atomic64_unchecked_t
6534 + *
6535 + * Atomically adds @i to @v and returns @i + @v
6536 + */
6537 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6538 +{
6539 + long __i = i;
6540 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6541 : "+r" (i), "+m" (v->counter)
6542 : : "memory");
6543 return i + __i;
6544 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6545 }
6546
6547 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6548 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6549 +{
6550 + return atomic64_add_return_unchecked(1, v);
6551 +}
6552 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6553
6554 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6555 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6556 return cmpxchg(&v->counter, old, new);
6557 }
6558
6559 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6560 +{
6561 + return cmpxchg(&v->counter, old, new);
6562 +}
6563 +
6564 static inline long atomic64_xchg(atomic64_t *v, long new)
6565 {
6566 return xchg(&v->counter, new);
6567 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6568 */
6569 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6570 {
6571 - long c, old;
6572 + long c, old, new;
6573 c = atomic64_read(v);
6574 for (;;) {
6575 - if (unlikely(c == (u)))
6576 + if (unlikely(c == u))
6577 break;
6578 - old = atomic64_cmpxchg((v), c, c + (a));
6579 +
6580 + asm volatile("add %2,%0\n"
6581 +
6582 +#ifdef CONFIG_PAX_REFCOUNT
6583 + "jno 0f\n"
6584 + "sub %2,%0\n"
6585 + "int $4\n0:\n"
6586 + _ASM_EXTABLE(0b, 0b)
6587 +#endif
6588 +
6589 + : "=r" (new)
6590 + : "0" (c), "ir" (a));
6591 +
6592 + old = atomic64_cmpxchg(v, c, new);
6593 if (likely(old == c))
6594 break;
6595 c = old;
6596 }
6597 - return c != (u);
6598 + return c != u;
6599 }
6600
6601 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6602 diff -urNp linux-3.0.8/arch/x86/include/asm/atomic.h linux-3.0.8/arch/x86/include/asm/atomic.h
6603 --- linux-3.0.8/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6604 +++ linux-3.0.8/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6605 @@ -22,7 +22,18 @@
6606 */
6607 static inline int atomic_read(const atomic_t *v)
6608 {
6609 - return (*(volatile int *)&(v)->counter);
6610 + return (*(volatile const int *)&(v)->counter);
6611 +}
6612 +
6613 +/**
6614 + * atomic_read_unchecked - read atomic variable
6615 + * @v: pointer of type atomic_unchecked_t
6616 + *
6617 + * Atomically reads the value of @v.
6618 + */
6619 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6620 +{
6621 + return (*(volatile const int *)&(v)->counter);
6622 }
6623
6624 /**
6625 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6626 }
6627
6628 /**
6629 + * atomic_set_unchecked - set atomic variable
6630 + * @v: pointer of type atomic_unchecked_t
6631 + * @i: required value
6632 + *
6633 + * Atomically sets the value of @v to @i.
6634 + */
6635 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6636 +{
6637 + v->counter = i;
6638 +}
6639 +
6640 +/**
6641 * atomic_add - add integer to atomic variable
6642 * @i: integer value to add
6643 * @v: pointer of type atomic_t
6644 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6645 */
6646 static inline void atomic_add(int i, atomic_t *v)
6647 {
6648 - asm volatile(LOCK_PREFIX "addl %1,%0"
6649 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6650 +
6651 +#ifdef CONFIG_PAX_REFCOUNT
6652 + "jno 0f\n"
6653 + LOCK_PREFIX "subl %1,%0\n"
6654 + "int $4\n0:\n"
6655 + _ASM_EXTABLE(0b, 0b)
6656 +#endif
6657 +
6658 + : "+m" (v->counter)
6659 + : "ir" (i));
6660 +}
6661 +
6662 +/**
6663 + * atomic_add_unchecked - add integer to atomic variable
6664 + * @i: integer value to add
6665 + * @v: pointer of type atomic_unchecked_t
6666 + *
6667 + * Atomically adds @i to @v.
6668 + */
6669 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6670 +{
6671 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6672 : "+m" (v->counter)
6673 : "ir" (i));
6674 }
6675 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6676 */
6677 static inline void atomic_sub(int i, atomic_t *v)
6678 {
6679 - asm volatile(LOCK_PREFIX "subl %1,%0"
6680 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6681 +
6682 +#ifdef CONFIG_PAX_REFCOUNT
6683 + "jno 0f\n"
6684 + LOCK_PREFIX "addl %1,%0\n"
6685 + "int $4\n0:\n"
6686 + _ASM_EXTABLE(0b, 0b)
6687 +#endif
6688 +
6689 + : "+m" (v->counter)
6690 + : "ir" (i));
6691 +}
6692 +
6693 +/**
6694 + * atomic_sub_unchecked - subtract integer from atomic variable
6695 + * @i: integer value to subtract
6696 + * @v: pointer of type atomic_unchecked_t
6697 + *
6698 + * Atomically subtracts @i from @v.
6699 + */
6700 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6701 +{
6702 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6703 : "+m" (v->counter)
6704 : "ir" (i));
6705 }
6706 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6707 {
6708 unsigned char c;
6709
6710 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6711 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6712 +
6713 +#ifdef CONFIG_PAX_REFCOUNT
6714 + "jno 0f\n"
6715 + LOCK_PREFIX "addl %2,%0\n"
6716 + "int $4\n0:\n"
6717 + _ASM_EXTABLE(0b, 0b)
6718 +#endif
6719 +
6720 + "sete %1\n"
6721 : "+m" (v->counter), "=qm" (c)
6722 : "ir" (i) : "memory");
6723 return c;
6724 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6725 */
6726 static inline void atomic_inc(atomic_t *v)
6727 {
6728 - asm volatile(LOCK_PREFIX "incl %0"
6729 + asm volatile(LOCK_PREFIX "incl %0\n"
6730 +
6731 +#ifdef CONFIG_PAX_REFCOUNT
6732 + "jno 0f\n"
6733 + LOCK_PREFIX "decl %0\n"
6734 + "int $4\n0:\n"
6735 + _ASM_EXTABLE(0b, 0b)
6736 +#endif
6737 +
6738 + : "+m" (v->counter));
6739 +}
6740 +
6741 +/**
6742 + * atomic_inc_unchecked - increment atomic variable
6743 + * @v: pointer of type atomic_unchecked_t
6744 + *
6745 + * Atomically increments @v by 1.
6746 + */
6747 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6748 +{
6749 + asm volatile(LOCK_PREFIX "incl %0\n"
6750 : "+m" (v->counter));
6751 }
6752
6753 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6754 */
6755 static inline void atomic_dec(atomic_t *v)
6756 {
6757 - asm volatile(LOCK_PREFIX "decl %0"
6758 + asm volatile(LOCK_PREFIX "decl %0\n"
6759 +
6760 +#ifdef CONFIG_PAX_REFCOUNT
6761 + "jno 0f\n"
6762 + LOCK_PREFIX "incl %0\n"
6763 + "int $4\n0:\n"
6764 + _ASM_EXTABLE(0b, 0b)
6765 +#endif
6766 +
6767 + : "+m" (v->counter));
6768 +}
6769 +
6770 +/**
6771 + * atomic_dec_unchecked - decrement atomic variable
6772 + * @v: pointer of type atomic_unchecked_t
6773 + *
6774 + * Atomically decrements @v by 1.
6775 + */
6776 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6777 +{
6778 + asm volatile(LOCK_PREFIX "decl %0\n"
6779 : "+m" (v->counter));
6780 }
6781
6782 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6783 {
6784 unsigned char c;
6785
6786 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6787 + asm volatile(LOCK_PREFIX "decl %0\n"
6788 +
6789 +#ifdef CONFIG_PAX_REFCOUNT
6790 + "jno 0f\n"
6791 + LOCK_PREFIX "incl %0\n"
6792 + "int $4\n0:\n"
6793 + _ASM_EXTABLE(0b, 0b)
6794 +#endif
6795 +
6796 + "sete %1\n"
6797 : "+m" (v->counter), "=qm" (c)
6798 : : "memory");
6799 return c != 0;
6800 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6801 {
6802 unsigned char c;
6803
6804 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6805 + asm volatile(LOCK_PREFIX "incl %0\n"
6806 +
6807 +#ifdef CONFIG_PAX_REFCOUNT
6808 + "jno 0f\n"
6809 + LOCK_PREFIX "decl %0\n"
6810 + "int $4\n0:\n"
6811 + _ASM_EXTABLE(0b, 0b)
6812 +#endif
6813 +
6814 + "sete %1\n"
6815 + : "+m" (v->counter), "=qm" (c)
6816 + : : "memory");
6817 + return c != 0;
6818 +}
6819 +
6820 +/**
6821 + * atomic_inc_and_test_unchecked - increment and test
6822 + * @v: pointer of type atomic_unchecked_t
6823 + *
6824 + * Atomically increments @v by 1
6825 + * and returns true if the result is zero, or false for all
6826 + * other cases.
6827 + */
6828 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6829 +{
6830 + unsigned char c;
6831 +
6832 + asm volatile(LOCK_PREFIX "incl %0\n"
6833 + "sete %1\n"
6834 : "+m" (v->counter), "=qm" (c)
6835 : : "memory");
6836 return c != 0;
6837 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6838 {
6839 unsigned char c;
6840
6841 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6842 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6843 +
6844 +#ifdef CONFIG_PAX_REFCOUNT
6845 + "jno 0f\n"
6846 + LOCK_PREFIX "subl %2,%0\n"
6847 + "int $4\n0:\n"
6848 + _ASM_EXTABLE(0b, 0b)
6849 +#endif
6850 +
6851 + "sets %1\n"
6852 : "+m" (v->counter), "=qm" (c)
6853 : "ir" (i) : "memory");
6854 return c;
6855 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6856 #endif
6857 /* Modern 486+ processor */
6858 __i = i;
6859 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6860 +
6861 +#ifdef CONFIG_PAX_REFCOUNT
6862 + "jno 0f\n"
6863 + "movl %0, %1\n"
6864 + "int $4\n0:\n"
6865 + _ASM_EXTABLE(0b, 0b)
6866 +#endif
6867 +
6868 + : "+r" (i), "+m" (v->counter)
6869 + : : "memory");
6870 + return i + __i;
6871 +
6872 +#ifdef CONFIG_M386
6873 +no_xadd: /* Legacy 386 processor */
6874 + local_irq_save(flags);
6875 + __i = atomic_read(v);
6876 + atomic_set(v, i + __i);
6877 + local_irq_restore(flags);
6878 + return i + __i;
6879 +#endif
6880 +}
6881 +
6882 +/**
6883 + * atomic_add_return_unchecked - add integer and return
6884 + * @v: pointer of type atomic_unchecked_t
6885 + * @i: integer value to add
6886 + *
6887 + * Atomically adds @i to @v and returns @i + @v
6888 + */
6889 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6890 +{
6891 + int __i;
6892 +#ifdef CONFIG_M386
6893 + unsigned long flags;
6894 + if (unlikely(boot_cpu_data.x86 <= 3))
6895 + goto no_xadd;
6896 +#endif
6897 + /* Modern 486+ processor */
6898 + __i = i;
6899 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6900 : "+r" (i), "+m" (v->counter)
6901 : : "memory");
6902 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6903 }
6904
6905 #define atomic_inc_return(v) (atomic_add_return(1, v))
6906 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6907 +{
6908 + return atomic_add_return_unchecked(1, v);
6909 +}
6910 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6911
6912 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6913 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6914 return cmpxchg(&v->counter, old, new);
6915 }
6916
6917 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6918 +{
6919 + return cmpxchg(&v->counter, old, new);
6920 +}
6921 +
6922 static inline int atomic_xchg(atomic_t *v, int new)
6923 {
6924 return xchg(&v->counter, new);
6925 }
6926
6927 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6928 +{
6929 + return xchg(&v->counter, new);
6930 +}
6931 +
6932 /**
6933 * atomic_add_unless - add unless the number is already a given value
6934 * @v: pointer of type atomic_t
6935 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6936 */
6937 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6938 {
6939 - int c, old;
6940 + int c, old, new;
6941 c = atomic_read(v);
6942 for (;;) {
6943 - if (unlikely(c == (u)))
6944 + if (unlikely(c == u))
6945 break;
6946 - old = atomic_cmpxchg((v), c, c + (a));
6947 +
6948 + asm volatile("addl %2,%0\n"
6949 +
6950 +#ifdef CONFIG_PAX_REFCOUNT
6951 + "jno 0f\n"
6952 + "subl %2,%0\n"
6953 + "int $4\n0:\n"
6954 + _ASM_EXTABLE(0b, 0b)
6955 +#endif
6956 +
6957 + : "=r" (new)
6958 + : "0" (c), "ir" (a));
6959 +
6960 + old = atomic_cmpxchg(v, c, new);
6961 if (likely(old == c))
6962 break;
6963 c = old;
6964 }
6965 - return c != (u);
6966 + return c != u;
6967 }
6968
6969 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6970
6971 +/**
6972 + * atomic_inc_not_zero_hint - increment if not null
6973 + * @v: pointer of type atomic_t
6974 + * @hint: probable value of the atomic before the increment
6975 + *
6976 + * This version of atomic_inc_not_zero() gives a hint of probable
6977 + * value of the atomic. This helps processor to not read the memory
6978 + * before doing the atomic read/modify/write cycle, lowering
6979 + * number of bus transactions on some arches.
6980 + *
6981 + * Returns: 0 if increment was not done, 1 otherwise.
6982 + */
6983 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6984 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6985 +{
6986 + int val, c = hint, new;
6987 +
6988 + /* sanity test, should be removed by compiler if hint is a constant */
6989 + if (!hint)
6990 + return atomic_inc_not_zero(v);
6991 +
6992 + do {
6993 + asm volatile("incl %0\n"
6994 +
6995 +#ifdef CONFIG_PAX_REFCOUNT
6996 + "jno 0f\n"
6997 + "decl %0\n"
6998 + "int $4\n0:\n"
6999 + _ASM_EXTABLE(0b, 0b)
7000 +#endif
7001 +
7002 + : "=r" (new)
7003 + : "0" (c));
7004 +
7005 + val = atomic_cmpxchg(v, c, new);
7006 + if (val == c)
7007 + return 1;
7008 + c = val;
7009 + } while (c);
7010 +
7011 + return 0;
7012 +}
7013 +
7014 /*
7015 * atomic_dec_if_positive - decrement by 1 if old value positive
7016 * @v: pointer of type atomic_t
7017 diff -urNp linux-3.0.8/arch/x86/include/asm/bitops.h linux-3.0.8/arch/x86/include/asm/bitops.h
7018 --- linux-3.0.8/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
7019 +++ linux-3.0.8/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
7020 @@ -38,7 +38,7 @@
7021 * a mask operation on a byte.
7022 */
7023 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7024 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7025 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7026 #define CONST_MASK(nr) (1 << ((nr) & 7))
7027
7028 /**
7029 diff -urNp linux-3.0.8/arch/x86/include/asm/boot.h linux-3.0.8/arch/x86/include/asm/boot.h
7030 --- linux-3.0.8/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
7031 +++ linux-3.0.8/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
7032 @@ -11,10 +11,15 @@
7033 #include <asm/pgtable_types.h>
7034
7035 /* Physical address where kernel should be loaded. */
7036 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7037 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7038 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7039 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7040
7041 +#ifndef __ASSEMBLY__
7042 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
7043 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7044 +#endif
7045 +
7046 /* Minimum kernel alignment, as a power of two */
7047 #ifdef CONFIG_X86_64
7048 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7049 diff -urNp linux-3.0.8/arch/x86/include/asm/cacheflush.h linux-3.0.8/arch/x86/include/asm/cacheflush.h
7050 --- linux-3.0.8/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
7051 +++ linux-3.0.8/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
7052 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7053 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7054
7055 if (pg_flags == _PGMT_DEFAULT)
7056 - return -1;
7057 + return ~0UL;
7058 else if (pg_flags == _PGMT_WC)
7059 return _PAGE_CACHE_WC;
7060 else if (pg_flags == _PGMT_UC_MINUS)
7061 diff -urNp linux-3.0.8/arch/x86/include/asm/cache.h linux-3.0.8/arch/x86/include/asm/cache.h
7062 --- linux-3.0.8/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
7063 +++ linux-3.0.8/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
7064 @@ -5,12 +5,13 @@
7065
7066 /* L1 cache line size */
7067 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7068 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7069 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7070
7071 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7072 +#define __read_only __attribute__((__section__(".data..read_only")))
7073
7074 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7075 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7076 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7077
7078 #ifdef CONFIG_X86_VSMP
7079 #ifdef CONFIG_SMP
7080 diff -urNp linux-3.0.8/arch/x86/include/asm/checksum_32.h linux-3.0.8/arch/x86/include/asm/checksum_32.h
7081 --- linux-3.0.8/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
7082 +++ linux-3.0.8/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
7083 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7084 int len, __wsum sum,
7085 int *src_err_ptr, int *dst_err_ptr);
7086
7087 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7088 + int len, __wsum sum,
7089 + int *src_err_ptr, int *dst_err_ptr);
7090 +
7091 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7092 + int len, __wsum sum,
7093 + int *src_err_ptr, int *dst_err_ptr);
7094 +
7095 /*
7096 * Note: when you get a NULL pointer exception here this means someone
7097 * passed in an incorrect kernel address to one of these functions.
7098 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7099 int *err_ptr)
7100 {
7101 might_sleep();
7102 - return csum_partial_copy_generic((__force void *)src, dst,
7103 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
7104 len, sum, err_ptr, NULL);
7105 }
7106
7107 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7108 {
7109 might_sleep();
7110 if (access_ok(VERIFY_WRITE, dst, len))
7111 - return csum_partial_copy_generic(src, (__force void *)dst,
7112 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7113 len, sum, NULL, err_ptr);
7114
7115 if (len)
7116 diff -urNp linux-3.0.8/arch/x86/include/asm/cpufeature.h linux-3.0.8/arch/x86/include/asm/cpufeature.h
7117 --- linux-3.0.8/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
7118 +++ linux-3.0.8/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
7119 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7120 ".section .discard,\"aw\",@progbits\n"
7121 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7122 ".previous\n"
7123 - ".section .altinstr_replacement,\"ax\"\n"
7124 + ".section .altinstr_replacement,\"a\"\n"
7125 "3: movb $1,%0\n"
7126 "4:\n"
7127 ".previous\n"
7128 diff -urNp linux-3.0.8/arch/x86/include/asm/desc_defs.h linux-3.0.8/arch/x86/include/asm/desc_defs.h
7129 --- linux-3.0.8/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
7130 +++ linux-3.0.8/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
7131 @@ -31,6 +31,12 @@ struct desc_struct {
7132 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7133 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7134 };
7135 + struct {
7136 + u16 offset_low;
7137 + u16 seg;
7138 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7139 + unsigned offset_high: 16;
7140 + } gate;
7141 };
7142 } __attribute__((packed));
7143
7144 diff -urNp linux-3.0.8/arch/x86/include/asm/desc.h linux-3.0.8/arch/x86/include/asm/desc.h
7145 --- linux-3.0.8/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
7146 +++ linux-3.0.8/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
7147 @@ -4,6 +4,7 @@
7148 #include <asm/desc_defs.h>
7149 #include <asm/ldt.h>
7150 #include <asm/mmu.h>
7151 +#include <asm/pgtable.h>
7152
7153 #include <linux/smp.h>
7154
7155 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7156
7157 desc->type = (info->read_exec_only ^ 1) << 1;
7158 desc->type |= info->contents << 2;
7159 + desc->type |= info->seg_not_present ^ 1;
7160
7161 desc->s = 1;
7162 desc->dpl = 0x3;
7163 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7164 }
7165
7166 extern struct desc_ptr idt_descr;
7167 -extern gate_desc idt_table[];
7168 -
7169 -struct gdt_page {
7170 - struct desc_struct gdt[GDT_ENTRIES];
7171 -} __attribute__((aligned(PAGE_SIZE)));
7172 -
7173 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7174 +extern gate_desc idt_table[256];
7175
7176 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7177 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7178 {
7179 - return per_cpu(gdt_page, cpu).gdt;
7180 + return cpu_gdt_table[cpu];
7181 }
7182
7183 #ifdef CONFIG_X86_64
7184 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7185 unsigned long base, unsigned dpl, unsigned flags,
7186 unsigned short seg)
7187 {
7188 - gate->a = (seg << 16) | (base & 0xffff);
7189 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7190 + gate->gate.offset_low = base;
7191 + gate->gate.seg = seg;
7192 + gate->gate.reserved = 0;
7193 + gate->gate.type = type;
7194 + gate->gate.s = 0;
7195 + gate->gate.dpl = dpl;
7196 + gate->gate.p = 1;
7197 + gate->gate.offset_high = base >> 16;
7198 }
7199
7200 #endif
7201 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7202
7203 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7204 {
7205 + pax_open_kernel();
7206 memcpy(&idt[entry], gate, sizeof(*gate));
7207 + pax_close_kernel();
7208 }
7209
7210 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7211 {
7212 + pax_open_kernel();
7213 memcpy(&ldt[entry], desc, 8);
7214 + pax_close_kernel();
7215 }
7216
7217 static inline void
7218 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7219 default: size = sizeof(*gdt); break;
7220 }
7221
7222 + pax_open_kernel();
7223 memcpy(&gdt[entry], desc, size);
7224 + pax_close_kernel();
7225 }
7226
7227 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7228 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7229
7230 static inline void native_load_tr_desc(void)
7231 {
7232 + pax_open_kernel();
7233 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7234 + pax_close_kernel();
7235 }
7236
7237 static inline void native_load_gdt(const struct desc_ptr *dtr)
7238 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7239 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7240 unsigned int i;
7241
7242 + pax_open_kernel();
7243 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7244 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7245 + pax_close_kernel();
7246 }
7247
7248 #define _LDT_empty(info) \
7249 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7250 desc->limit = (limit >> 16) & 0xf;
7251 }
7252
7253 -static inline void _set_gate(int gate, unsigned type, void *addr,
7254 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7255 unsigned dpl, unsigned ist, unsigned seg)
7256 {
7257 gate_desc s;
7258 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7259 * Pentium F0 0F bugfix can have resulted in the mapped
7260 * IDT being write-protected.
7261 */
7262 -static inline void set_intr_gate(unsigned int n, void *addr)
7263 +static inline void set_intr_gate(unsigned int n, const void *addr)
7264 {
7265 BUG_ON((unsigned)n > 0xFF);
7266 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7267 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7268 /*
7269 * This routine sets up an interrupt gate at directory privilege level 3.
7270 */
7271 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7272 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7273 {
7274 BUG_ON((unsigned)n > 0xFF);
7275 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7276 }
7277
7278 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7279 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7280 {
7281 BUG_ON((unsigned)n > 0xFF);
7282 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7283 }
7284
7285 -static inline void set_trap_gate(unsigned int n, void *addr)
7286 +static inline void set_trap_gate(unsigned int n, const void *addr)
7287 {
7288 BUG_ON((unsigned)n > 0xFF);
7289 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7290 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7291 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7292 {
7293 BUG_ON((unsigned)n > 0xFF);
7294 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7295 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7296 }
7297
7298 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7299 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7300 {
7301 BUG_ON((unsigned)n > 0xFF);
7302 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7303 }
7304
7305 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7306 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7307 {
7308 BUG_ON((unsigned)n > 0xFF);
7309 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7310 }
7311
7312 +#ifdef CONFIG_X86_32
7313 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7314 +{
7315 + struct desc_struct d;
7316 +
7317 + if (likely(limit))
7318 + limit = (limit - 1UL) >> PAGE_SHIFT;
7319 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7320 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7321 +}
7322 +#endif
7323 +
7324 #endif /* _ASM_X86_DESC_H */
7325 diff -urNp linux-3.0.8/arch/x86/include/asm/e820.h linux-3.0.8/arch/x86/include/asm/e820.h
7326 --- linux-3.0.8/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7327 +++ linux-3.0.8/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7328 @@ -69,7 +69,7 @@ struct e820map {
7329 #define ISA_START_ADDRESS 0xa0000
7330 #define ISA_END_ADDRESS 0x100000
7331
7332 -#define BIOS_BEGIN 0x000a0000
7333 +#define BIOS_BEGIN 0x000c0000
7334 #define BIOS_END 0x00100000
7335
7336 #define BIOS_ROM_BASE 0xffe00000
7337 diff -urNp linux-3.0.8/arch/x86/include/asm/elf.h linux-3.0.8/arch/x86/include/asm/elf.h
7338 --- linux-3.0.8/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7339 +++ linux-3.0.8/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7340 @@ -237,7 +237,25 @@ extern int force_personality32;
7341 the loader. We need to make sure that it is out of the way of the program
7342 that it will "exec", and that there is sufficient room for the brk. */
7343
7344 +#ifdef CONFIG_PAX_SEGMEXEC
7345 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7346 +#else
7347 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7348 +#endif
7349 +
7350 +#ifdef CONFIG_PAX_ASLR
7351 +#ifdef CONFIG_X86_32
7352 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7353 +
7354 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7355 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7356 +#else
7357 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7358 +
7359 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7360 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7361 +#endif
7362 +#endif
7363
7364 /* This yields a mask that user programs can use to figure out what
7365 instruction set this CPU supports. This could be done in user space,
7366 @@ -290,9 +308,7 @@ do { \
7367
7368 #define ARCH_DLINFO \
7369 do { \
7370 - if (vdso_enabled) \
7371 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7372 - (unsigned long)current->mm->context.vdso); \
7373 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7374 } while (0)
7375
7376 #define AT_SYSINFO 32
7377 @@ -303,7 +319,7 @@ do { \
7378
7379 #endif /* !CONFIG_X86_32 */
7380
7381 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7382 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7383
7384 #define VDSO_ENTRY \
7385 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7386 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7387 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7388 #define compat_arch_setup_additional_pages syscall32_setup_pages
7389
7390 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7391 -#define arch_randomize_brk arch_randomize_brk
7392 -
7393 #endif /* _ASM_X86_ELF_H */
7394 diff -urNp linux-3.0.8/arch/x86/include/asm/emergency-restart.h linux-3.0.8/arch/x86/include/asm/emergency-restart.h
7395 --- linux-3.0.8/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7396 +++ linux-3.0.8/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7397 @@ -15,6 +15,6 @@ enum reboot_type {
7398
7399 extern enum reboot_type reboot_type;
7400
7401 -extern void machine_emergency_restart(void);
7402 +extern void machine_emergency_restart(void) __noreturn;
7403
7404 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7405 diff -urNp linux-3.0.8/arch/x86/include/asm/futex.h linux-3.0.8/arch/x86/include/asm/futex.h
7406 --- linux-3.0.8/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7407 +++ linux-3.0.8/arch/x86/include/asm/futex.h 2011-10-06 04:17:55.000000000 -0400
7408 @@ -12,16 +12,18 @@
7409 #include <asm/system.h>
7410
7411 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7412 + typecheck(u32 __user *, uaddr); \
7413 asm volatile("1:\t" insn "\n" \
7414 "2:\t.section .fixup,\"ax\"\n" \
7415 "3:\tmov\t%3, %1\n" \
7416 "\tjmp\t2b\n" \
7417 "\t.previous\n" \
7418 _ASM_EXTABLE(1b, 3b) \
7419 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7420 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7421 : "i" (-EFAULT), "0" (oparg), "1" (0))
7422
7423 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7424 + typecheck(u32 __user *, uaddr); \
7425 asm volatile("1:\tmovl %2, %0\n" \
7426 "\tmovl\t%0, %3\n" \
7427 "\t" insn "\n" \
7428 @@ -34,7 +36,7 @@
7429 _ASM_EXTABLE(1b, 4b) \
7430 _ASM_EXTABLE(2b, 4b) \
7431 : "=&a" (oldval), "=&r" (ret), \
7432 - "+m" (*uaddr), "=&r" (tem) \
7433 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7434 : "r" (oparg), "i" (-EFAULT), "1" (0))
7435
7436 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7437 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7438
7439 switch (op) {
7440 case FUTEX_OP_SET:
7441 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7442 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7443 break;
7444 case FUTEX_OP_ADD:
7445 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7446 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7447 uaddr, oparg);
7448 break;
7449 case FUTEX_OP_OR:
7450 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7451 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7452 return -EFAULT;
7453
7454 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7455 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7456 "2:\t.section .fixup, \"ax\"\n"
7457 "3:\tmov %3, %0\n"
7458 "\tjmp 2b\n"
7459 "\t.previous\n"
7460 _ASM_EXTABLE(1b, 3b)
7461 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7462 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7463 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7464 : "memory"
7465 );
7466 diff -urNp linux-3.0.8/arch/x86/include/asm/hw_irq.h linux-3.0.8/arch/x86/include/asm/hw_irq.h
7467 --- linux-3.0.8/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7468 +++ linux-3.0.8/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7469 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7470 extern void enable_IO_APIC(void);
7471
7472 /* Statistics */
7473 -extern atomic_t irq_err_count;
7474 -extern atomic_t irq_mis_count;
7475 +extern atomic_unchecked_t irq_err_count;
7476 +extern atomic_unchecked_t irq_mis_count;
7477
7478 /* EISA */
7479 extern void eisa_set_level_irq(unsigned int irq);
7480 diff -urNp linux-3.0.8/arch/x86/include/asm/i387.h linux-3.0.8/arch/x86/include/asm/i387.h
7481 --- linux-3.0.8/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7482 +++ linux-3.0.8/arch/x86/include/asm/i387.h 2011-10-06 04:17:55.000000000 -0400
7483 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7484 {
7485 int err;
7486
7487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7488 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7489 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7490 +#endif
7491 +
7492 /* See comment in fxsave() below. */
7493 #ifdef CONFIG_AS_FXSAVEQ
7494 asm volatile("1: fxrstorq %[fx]\n\t"
7495 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7496 {
7497 int err;
7498
7499 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7500 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7501 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7502 +#endif
7503 +
7504 /*
7505 * Clear the bytes not touched by the fxsave and reserved
7506 * for the SW usage.
7507 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7508 #endif /* CONFIG_X86_64 */
7509
7510 /* We need a safe address that is cheap to find and that is already
7511 - in L1 during context switch. The best choices are unfortunately
7512 - different for UP and SMP */
7513 -#ifdef CONFIG_SMP
7514 -#define safe_address (__per_cpu_offset[0])
7515 -#else
7516 -#define safe_address (kstat_cpu(0).cpustat.user)
7517 -#endif
7518 + in L1 during context switch. */
7519 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7520
7521 /*
7522 * These must be called with preempt disabled
7523 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7524 struct thread_info *me = current_thread_info();
7525 preempt_disable();
7526 if (me->status & TS_USEDFPU)
7527 - __save_init_fpu(me->task);
7528 + __save_init_fpu(current);
7529 else
7530 clts();
7531 }
7532 diff -urNp linux-3.0.8/arch/x86/include/asm/io.h linux-3.0.8/arch/x86/include/asm/io.h
7533 --- linux-3.0.8/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7534 +++ linux-3.0.8/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7535 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7536
7537 #include <linux/vmalloc.h>
7538
7539 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7540 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7541 +{
7542 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7543 +}
7544 +
7545 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7546 +{
7547 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7548 +}
7549 +
7550 /*
7551 * Convert a virtual cached pointer to an uncached pointer
7552 */
7553 diff -urNp linux-3.0.8/arch/x86/include/asm/irqflags.h linux-3.0.8/arch/x86/include/asm/irqflags.h
7554 --- linux-3.0.8/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7555 +++ linux-3.0.8/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7556 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7557 sti; \
7558 sysexit
7559
7560 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7561 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7562 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7563 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7564 +
7565 #else
7566 #define INTERRUPT_RETURN iret
7567 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7568 diff -urNp linux-3.0.8/arch/x86/include/asm/kprobes.h linux-3.0.8/arch/x86/include/asm/kprobes.h
7569 --- linux-3.0.8/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7570 +++ linux-3.0.8/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7571 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7572 #define RELATIVEJUMP_SIZE 5
7573 #define RELATIVECALL_OPCODE 0xe8
7574 #define RELATIVE_ADDR_SIZE 4
7575 -#define MAX_STACK_SIZE 64
7576 -#define MIN_STACK_SIZE(ADDR) \
7577 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7578 - THREAD_SIZE - (unsigned long)(ADDR))) \
7579 - ? (MAX_STACK_SIZE) \
7580 - : (((unsigned long)current_thread_info()) + \
7581 - THREAD_SIZE - (unsigned long)(ADDR)))
7582 +#define MAX_STACK_SIZE 64UL
7583 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7584
7585 #define flush_insn_slot(p) do { } while (0)
7586
7587 diff -urNp linux-3.0.8/arch/x86/include/asm/kvm_host.h linux-3.0.8/arch/x86/include/asm/kvm_host.h
7588 --- linux-3.0.8/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7589 +++ linux-3.0.8/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7590 @@ -441,7 +441,7 @@ struct kvm_arch {
7591 unsigned int n_used_mmu_pages;
7592 unsigned int n_requested_mmu_pages;
7593 unsigned int n_max_mmu_pages;
7594 - atomic_t invlpg_counter;
7595 + atomic_unchecked_t invlpg_counter;
7596 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7597 /*
7598 * Hash table of struct kvm_mmu_page.
7599 @@ -619,7 +619,7 @@ struct kvm_x86_ops {
7600 enum x86_intercept_stage stage);
7601
7602 const struct trace_print_flags *exit_reasons_str;
7603 -};
7604 +} __do_const;
7605
7606 struct kvm_arch_async_pf {
7607 u32 token;
7608 diff -urNp linux-3.0.8/arch/x86/include/asm/local.h linux-3.0.8/arch/x86/include/asm/local.h
7609 --- linux-3.0.8/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7610 +++ linux-3.0.8/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7611 @@ -18,26 +18,58 @@ typedef struct {
7612
7613 static inline void local_inc(local_t *l)
7614 {
7615 - asm volatile(_ASM_INC "%0"
7616 + asm volatile(_ASM_INC "%0\n"
7617 +
7618 +#ifdef CONFIG_PAX_REFCOUNT
7619 + "jno 0f\n"
7620 + _ASM_DEC "%0\n"
7621 + "int $4\n0:\n"
7622 + _ASM_EXTABLE(0b, 0b)
7623 +#endif
7624 +
7625 : "+m" (l->a.counter));
7626 }
7627
7628 static inline void local_dec(local_t *l)
7629 {
7630 - asm volatile(_ASM_DEC "%0"
7631 + asm volatile(_ASM_DEC "%0\n"
7632 +
7633 +#ifdef CONFIG_PAX_REFCOUNT
7634 + "jno 0f\n"
7635 + _ASM_INC "%0\n"
7636 + "int $4\n0:\n"
7637 + _ASM_EXTABLE(0b, 0b)
7638 +#endif
7639 +
7640 : "+m" (l->a.counter));
7641 }
7642
7643 static inline void local_add(long i, local_t *l)
7644 {
7645 - asm volatile(_ASM_ADD "%1,%0"
7646 + asm volatile(_ASM_ADD "%1,%0\n"
7647 +
7648 +#ifdef CONFIG_PAX_REFCOUNT
7649 + "jno 0f\n"
7650 + _ASM_SUB "%1,%0\n"
7651 + "int $4\n0:\n"
7652 + _ASM_EXTABLE(0b, 0b)
7653 +#endif
7654 +
7655 : "+m" (l->a.counter)
7656 : "ir" (i));
7657 }
7658
7659 static inline void local_sub(long i, local_t *l)
7660 {
7661 - asm volatile(_ASM_SUB "%1,%0"
7662 + asm volatile(_ASM_SUB "%1,%0\n"
7663 +
7664 +#ifdef CONFIG_PAX_REFCOUNT
7665 + "jno 0f\n"
7666 + _ASM_ADD "%1,%0\n"
7667 + "int $4\n0:\n"
7668 + _ASM_EXTABLE(0b, 0b)
7669 +#endif
7670 +
7671 : "+m" (l->a.counter)
7672 : "ir" (i));
7673 }
7674 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7675 {
7676 unsigned char c;
7677
7678 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7679 + asm volatile(_ASM_SUB "%2,%0\n"
7680 +
7681 +#ifdef CONFIG_PAX_REFCOUNT
7682 + "jno 0f\n"
7683 + _ASM_ADD "%2,%0\n"
7684 + "int $4\n0:\n"
7685 + _ASM_EXTABLE(0b, 0b)
7686 +#endif
7687 +
7688 + "sete %1\n"
7689 : "+m" (l->a.counter), "=qm" (c)
7690 : "ir" (i) : "memory");
7691 return c;
7692 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7693 {
7694 unsigned char c;
7695
7696 - asm volatile(_ASM_DEC "%0; sete %1"
7697 + asm volatile(_ASM_DEC "%0\n"
7698 +
7699 +#ifdef CONFIG_PAX_REFCOUNT
7700 + "jno 0f\n"
7701 + _ASM_INC "%0\n"
7702 + "int $4\n0:\n"
7703 + _ASM_EXTABLE(0b, 0b)
7704 +#endif
7705 +
7706 + "sete %1\n"
7707 : "+m" (l->a.counter), "=qm" (c)
7708 : : "memory");
7709 return c != 0;
7710 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7711 {
7712 unsigned char c;
7713
7714 - asm volatile(_ASM_INC "%0; sete %1"
7715 + asm volatile(_ASM_INC "%0\n"
7716 +
7717 +#ifdef CONFIG_PAX_REFCOUNT
7718 + "jno 0f\n"
7719 + _ASM_DEC "%0\n"
7720 + "int $4\n0:\n"
7721 + _ASM_EXTABLE(0b, 0b)
7722 +#endif
7723 +
7724 + "sete %1\n"
7725 : "+m" (l->a.counter), "=qm" (c)
7726 : : "memory");
7727 return c != 0;
7728 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7729 {
7730 unsigned char c;
7731
7732 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7733 + asm volatile(_ASM_ADD "%2,%0\n"
7734 +
7735 +#ifdef CONFIG_PAX_REFCOUNT
7736 + "jno 0f\n"
7737 + _ASM_SUB "%2,%0\n"
7738 + "int $4\n0:\n"
7739 + _ASM_EXTABLE(0b, 0b)
7740 +#endif
7741 +
7742 + "sets %1\n"
7743 : "+m" (l->a.counter), "=qm" (c)
7744 : "ir" (i) : "memory");
7745 return c;
7746 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7747 #endif
7748 /* Modern 486+ processor */
7749 __i = i;
7750 - asm volatile(_ASM_XADD "%0, %1;"
7751 + asm volatile(_ASM_XADD "%0, %1\n"
7752 +
7753 +#ifdef CONFIG_PAX_REFCOUNT
7754 + "jno 0f\n"
7755 + _ASM_MOV "%0,%1\n"
7756 + "int $4\n0:\n"
7757 + _ASM_EXTABLE(0b, 0b)
7758 +#endif
7759 +
7760 : "+r" (i), "+m" (l->a.counter)
7761 : : "memory");
7762 return i + __i;
7763 diff -urNp linux-3.0.8/arch/x86/include/asm/mman.h linux-3.0.8/arch/x86/include/asm/mman.h
7764 --- linux-3.0.8/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7765 +++ linux-3.0.8/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7766 @@ -5,4 +5,14 @@
7767
7768 #include <asm-generic/mman.h>
7769
7770 +#ifdef __KERNEL__
7771 +#ifndef __ASSEMBLY__
7772 +#ifdef CONFIG_X86_32
7773 +#define arch_mmap_check i386_mmap_check
7774 +int i386_mmap_check(unsigned long addr, unsigned long len,
7775 + unsigned long flags);
7776 +#endif
7777 +#endif
7778 +#endif
7779 +
7780 #endif /* _ASM_X86_MMAN_H */
7781 diff -urNp linux-3.0.8/arch/x86/include/asm/mmu_context.h linux-3.0.8/arch/x86/include/asm/mmu_context.h
7782 --- linux-3.0.8/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7783 +++ linux-3.0.8/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7784 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7785
7786 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7787 {
7788 +
7789 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7790 + unsigned int i;
7791 + pgd_t *pgd;
7792 +
7793 + pax_open_kernel();
7794 + pgd = get_cpu_pgd(smp_processor_id());
7795 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7796 + set_pgd_batched(pgd+i, native_make_pgd(0));
7797 + pax_close_kernel();
7798 +#endif
7799 +
7800 #ifdef CONFIG_SMP
7801 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7802 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7803 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7804 struct task_struct *tsk)
7805 {
7806 unsigned cpu = smp_processor_id();
7807 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7808 + int tlbstate = TLBSTATE_OK;
7809 +#endif
7810
7811 if (likely(prev != next)) {
7812 #ifdef CONFIG_SMP
7813 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7814 + tlbstate = percpu_read(cpu_tlbstate.state);
7815 +#endif
7816 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7817 percpu_write(cpu_tlbstate.active_mm, next);
7818 #endif
7819 cpumask_set_cpu(cpu, mm_cpumask(next));
7820
7821 /* Re-load page tables */
7822 +#ifdef CONFIG_PAX_PER_CPU_PGD
7823 + pax_open_kernel();
7824 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7825 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7826 + pax_close_kernel();
7827 + load_cr3(get_cpu_pgd(cpu));
7828 +#else
7829 load_cr3(next->pgd);
7830 +#endif
7831
7832 /* stop flush ipis for the previous mm */
7833 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7834 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7835 */
7836 if (unlikely(prev->context.ldt != next->context.ldt))
7837 load_LDT_nolock(&next->context);
7838 - }
7839 +
7840 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7841 + if (!(__supported_pte_mask & _PAGE_NX)) {
7842 + smp_mb__before_clear_bit();
7843 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7844 + smp_mb__after_clear_bit();
7845 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7846 + }
7847 +#endif
7848 +
7849 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7850 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7851 + prev->context.user_cs_limit != next->context.user_cs_limit))
7852 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7853 #ifdef CONFIG_SMP
7854 + else if (unlikely(tlbstate != TLBSTATE_OK))
7855 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7856 +#endif
7857 +#endif
7858 +
7859 + }
7860 else {
7861 +
7862 +#ifdef CONFIG_PAX_PER_CPU_PGD
7863 + pax_open_kernel();
7864 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7865 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7866 + pax_close_kernel();
7867 + load_cr3(get_cpu_pgd(cpu));
7868 +#endif
7869 +
7870 +#ifdef CONFIG_SMP
7871 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7872 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7873
7874 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7875 * tlb flush IPI delivery. We must reload CR3
7876 * to make sure to use no freed page tables.
7877 */
7878 +
7879 +#ifndef CONFIG_PAX_PER_CPU_PGD
7880 load_cr3(next->pgd);
7881 +#endif
7882 +
7883 load_LDT_nolock(&next->context);
7884 +
7885 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7886 + if (!(__supported_pte_mask & _PAGE_NX))
7887 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7888 +#endif
7889 +
7890 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7891 +#ifdef CONFIG_PAX_PAGEEXEC
7892 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7893 +#endif
7894 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7895 +#endif
7896 +
7897 }
7898 - }
7899 #endif
7900 + }
7901 }
7902
7903 #define activate_mm(prev, next) \
7904 diff -urNp linux-3.0.8/arch/x86/include/asm/mmu.h linux-3.0.8/arch/x86/include/asm/mmu.h
7905 --- linux-3.0.8/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7906 +++ linux-3.0.8/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7907 @@ -9,7 +9,7 @@
7908 * we put the segment information here.
7909 */
7910 typedef struct {
7911 - void *ldt;
7912 + struct desc_struct *ldt;
7913 int size;
7914
7915 #ifdef CONFIG_X86_64
7916 @@ -18,7 +18,19 @@ typedef struct {
7917 #endif
7918
7919 struct mutex lock;
7920 - void *vdso;
7921 + unsigned long vdso;
7922 +
7923 +#ifdef CONFIG_X86_32
7924 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7925 + unsigned long user_cs_base;
7926 + unsigned long user_cs_limit;
7927 +
7928 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7929 + cpumask_t cpu_user_cs_mask;
7930 +#endif
7931 +
7932 +#endif
7933 +#endif
7934 } mm_context_t;
7935
7936 #ifdef CONFIG_SMP
7937 diff -urNp linux-3.0.8/arch/x86/include/asm/module.h linux-3.0.8/arch/x86/include/asm/module.h
7938 --- linux-3.0.8/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7939 +++ linux-3.0.8/arch/x86/include/asm/module.h 2011-10-07 19:24:31.000000000 -0400
7940 @@ -5,6 +5,7 @@
7941
7942 #ifdef CONFIG_X86_64
7943 /* X86_64 does not define MODULE_PROC_FAMILY */
7944 +#define MODULE_PROC_FAMILY ""
7945 #elif defined CONFIG_M386
7946 #define MODULE_PROC_FAMILY "386 "
7947 #elif defined CONFIG_M486
7948 @@ -59,8 +60,18 @@
7949 #error unknown processor family
7950 #endif
7951
7952 -#ifdef CONFIG_X86_32
7953 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7954 +#ifdef CONFIG_PAX_KERNEXEC
7955 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7956 +#else
7957 +#define MODULE_PAX_KERNEXEC ""
7958 #endif
7959
7960 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7961 +#define MODULE_PAX_UDEREF "UDEREF "
7962 +#else
7963 +#define MODULE_PAX_UDEREF ""
7964 +#endif
7965 +
7966 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7967 +
7968 #endif /* _ASM_X86_MODULE_H */
7969 diff -urNp linux-3.0.8/arch/x86/include/asm/page_64_types.h linux-3.0.8/arch/x86/include/asm/page_64_types.h
7970 --- linux-3.0.8/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7971 +++ linux-3.0.8/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7972 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7973
7974 /* duplicated to the one in bootmem.h */
7975 extern unsigned long max_pfn;
7976 -extern unsigned long phys_base;
7977 +extern const unsigned long phys_base;
7978
7979 extern unsigned long __phys_addr(unsigned long);
7980 #define __phys_reloc_hide(x) (x)
7981 diff -urNp linux-3.0.8/arch/x86/include/asm/paravirt.h linux-3.0.8/arch/x86/include/asm/paravirt.h
7982 --- linux-3.0.8/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7983 +++ linux-3.0.8/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7984 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7985 val);
7986 }
7987
7988 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7989 +{
7990 + pgdval_t val = native_pgd_val(pgd);
7991 +
7992 + if (sizeof(pgdval_t) > sizeof(long))
7993 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7994 + val, (u64)val >> 32);
7995 + else
7996 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7997 + val);
7998 +}
7999 +
8000 static inline void pgd_clear(pgd_t *pgdp)
8001 {
8002 set_pgd(pgdp, __pgd(0));
8003 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
8004 pv_mmu_ops.set_fixmap(idx, phys, flags);
8005 }
8006
8007 +#ifdef CONFIG_PAX_KERNEXEC
8008 +static inline unsigned long pax_open_kernel(void)
8009 +{
8010 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8011 +}
8012 +
8013 +static inline unsigned long pax_close_kernel(void)
8014 +{
8015 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8016 +}
8017 +#else
8018 +static inline unsigned long pax_open_kernel(void) { return 0; }
8019 +static inline unsigned long pax_close_kernel(void) { return 0; }
8020 +#endif
8021 +
8022 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8023
8024 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
8025 @@ -955,7 +982,7 @@ extern void default_banner(void);
8026
8027 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8028 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8029 -#define PARA_INDIRECT(addr) *%cs:addr
8030 +#define PARA_INDIRECT(addr) *%ss:addr
8031 #endif
8032
8033 #define INTERRUPT_RETURN \
8034 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
8035 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8036 CLBR_NONE, \
8037 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8038 +
8039 +#define GET_CR0_INTO_RDI \
8040 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8041 + mov %rax,%rdi
8042 +
8043 +#define SET_RDI_INTO_CR0 \
8044 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8045 +
8046 +#define GET_CR3_INTO_RDI \
8047 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8048 + mov %rax,%rdi
8049 +
8050 +#define SET_RDI_INTO_CR3 \
8051 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8052 +
8053 #endif /* CONFIG_X86_32 */
8054
8055 #endif /* __ASSEMBLY__ */
8056 diff -urNp linux-3.0.8/arch/x86/include/asm/paravirt_types.h linux-3.0.8/arch/x86/include/asm/paravirt_types.h
8057 --- linux-3.0.8/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
8058 +++ linux-3.0.8/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
8059 @@ -78,19 +78,19 @@ struct pv_init_ops {
8060 */
8061 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8062 unsigned long addr, unsigned len);
8063 -};
8064 +} __no_const;
8065
8066
8067 struct pv_lazy_ops {
8068 /* Set deferred update mode, used for batching operations. */
8069 void (*enter)(void);
8070 void (*leave)(void);
8071 -};
8072 +} __no_const;
8073
8074 struct pv_time_ops {
8075 unsigned long long (*sched_clock)(void);
8076 unsigned long (*get_tsc_khz)(void);
8077 -};
8078 +} __no_const;
8079
8080 struct pv_cpu_ops {
8081 /* hooks for various privileged instructions */
8082 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
8083
8084 void (*start_context_switch)(struct task_struct *prev);
8085 void (*end_context_switch)(struct task_struct *next);
8086 -};
8087 +} __no_const;
8088
8089 struct pv_irq_ops {
8090 /*
8091 @@ -217,7 +217,7 @@ struct pv_apic_ops {
8092 unsigned long start_eip,
8093 unsigned long start_esp);
8094 #endif
8095 -};
8096 +} __no_const;
8097
8098 struct pv_mmu_ops {
8099 unsigned long (*read_cr2)(void);
8100 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
8101 struct paravirt_callee_save make_pud;
8102
8103 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8104 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8105 #endif /* PAGETABLE_LEVELS == 4 */
8106 #endif /* PAGETABLE_LEVELS >= 3 */
8107
8108 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
8109 an mfn. We can tell which is which from the index. */
8110 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8111 phys_addr_t phys, pgprot_t flags);
8112 +
8113 +#ifdef CONFIG_PAX_KERNEXEC
8114 + unsigned long (*pax_open_kernel)(void);
8115 + unsigned long (*pax_close_kernel)(void);
8116 +#endif
8117 +
8118 };
8119
8120 struct arch_spinlock;
8121 @@ -327,7 +334,7 @@ struct pv_lock_ops {
8122 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8123 int (*spin_trylock)(struct arch_spinlock *lock);
8124 void (*spin_unlock)(struct arch_spinlock *lock);
8125 -};
8126 +} __no_const;
8127
8128 /* This contains all the paravirt structures: we get a convenient
8129 * number for each function using the offset which we use to indicate
8130 diff -urNp linux-3.0.8/arch/x86/include/asm/pgalloc.h linux-3.0.8/arch/x86/include/asm/pgalloc.h
8131 --- linux-3.0.8/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
8132 +++ linux-3.0.8/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
8133 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8134 pmd_t *pmd, pte_t *pte)
8135 {
8136 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8137 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8138 +}
8139 +
8140 +static inline void pmd_populate_user(struct mm_struct *mm,
8141 + pmd_t *pmd, pte_t *pte)
8142 +{
8143 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8144 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8145 }
8146
8147 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable-2level.h linux-3.0.8/arch/x86/include/asm/pgtable-2level.h
8148 --- linux-3.0.8/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
8149 +++ linux-3.0.8/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
8150 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8151
8152 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8153 {
8154 + pax_open_kernel();
8155 *pmdp = pmd;
8156 + pax_close_kernel();
8157 }
8158
8159 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8160 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable_32.h linux-3.0.8/arch/x86/include/asm/pgtable_32.h
8161 --- linux-3.0.8/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
8162 +++ linux-3.0.8/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
8163 @@ -25,9 +25,6 @@
8164 struct mm_struct;
8165 struct vm_area_struct;
8166
8167 -extern pgd_t swapper_pg_dir[1024];
8168 -extern pgd_t initial_page_table[1024];
8169 -
8170 static inline void pgtable_cache_init(void) { }
8171 static inline void check_pgt_cache(void) { }
8172 void paging_init(void);
8173 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8174 # include <asm/pgtable-2level.h>
8175 #endif
8176
8177 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8178 +extern pgd_t initial_page_table[PTRS_PER_PGD];
8179 +#ifdef CONFIG_X86_PAE
8180 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8181 +#endif
8182 +
8183 #if defined(CONFIG_HIGHPTE)
8184 #define pte_offset_map(dir, address) \
8185 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8186 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8187 /* Clear a kernel PTE and flush it from the TLB */
8188 #define kpte_clear_flush(ptep, vaddr) \
8189 do { \
8190 + pax_open_kernel(); \
8191 pte_clear(&init_mm, (vaddr), (ptep)); \
8192 + pax_close_kernel(); \
8193 __flush_tlb_one((vaddr)); \
8194 } while (0)
8195
8196 @@ -74,6 +79,9 @@ do { \
8197
8198 #endif /* !__ASSEMBLY__ */
8199
8200 +#define HAVE_ARCH_UNMAPPED_AREA
8201 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8202 +
8203 /*
8204 * kern_addr_valid() is (1) for FLATMEM and (0) for
8205 * SPARSEMEM and DISCONTIGMEM
8206 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable_32_types.h linux-3.0.8/arch/x86/include/asm/pgtable_32_types.h
8207 --- linux-3.0.8/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8208 +++ linux-3.0.8/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8209 @@ -8,7 +8,7 @@
8210 */
8211 #ifdef CONFIG_X86_PAE
8212 # include <asm/pgtable-3level_types.h>
8213 -# define PMD_SIZE (1UL << PMD_SHIFT)
8214 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8215 # define PMD_MASK (~(PMD_SIZE - 1))
8216 #else
8217 # include <asm/pgtable-2level_types.h>
8218 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8219 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8220 #endif
8221
8222 +#ifdef CONFIG_PAX_KERNEXEC
8223 +#ifndef __ASSEMBLY__
8224 +extern unsigned char MODULES_EXEC_VADDR[];
8225 +extern unsigned char MODULES_EXEC_END[];
8226 +#endif
8227 +#include <asm/boot.h>
8228 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8229 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8230 +#else
8231 +#define ktla_ktva(addr) (addr)
8232 +#define ktva_ktla(addr) (addr)
8233 +#endif
8234 +
8235 #define MODULES_VADDR VMALLOC_START
8236 #define MODULES_END VMALLOC_END
8237 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8238 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable-3level.h linux-3.0.8/arch/x86/include/asm/pgtable-3level.h
8239 --- linux-3.0.8/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8240 +++ linux-3.0.8/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8241 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8242
8243 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8244 {
8245 + pax_open_kernel();
8246 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8247 + pax_close_kernel();
8248 }
8249
8250 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8251 {
8252 + pax_open_kernel();
8253 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8254 + pax_close_kernel();
8255 }
8256
8257 /*
8258 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable_64.h linux-3.0.8/arch/x86/include/asm/pgtable_64.h
8259 --- linux-3.0.8/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8260 +++ linux-3.0.8/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8261 @@ -16,10 +16,13 @@
8262
8263 extern pud_t level3_kernel_pgt[512];
8264 extern pud_t level3_ident_pgt[512];
8265 +extern pud_t level3_vmalloc_pgt[512];
8266 +extern pud_t level3_vmemmap_pgt[512];
8267 +extern pud_t level2_vmemmap_pgt[512];
8268 extern pmd_t level2_kernel_pgt[512];
8269 extern pmd_t level2_fixmap_pgt[512];
8270 -extern pmd_t level2_ident_pgt[512];
8271 -extern pgd_t init_level4_pgt[];
8272 +extern pmd_t level2_ident_pgt[512*2];
8273 +extern pgd_t init_level4_pgt[512];
8274
8275 #define swapper_pg_dir init_level4_pgt
8276
8277 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8278
8279 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8280 {
8281 + pax_open_kernel();
8282 *pmdp = pmd;
8283 + pax_close_kernel();
8284 }
8285
8286 static inline void native_pmd_clear(pmd_t *pmd)
8287 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8288
8289 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8290 {
8291 + pax_open_kernel();
8292 + *pgdp = pgd;
8293 + pax_close_kernel();
8294 +}
8295 +
8296 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8297 +{
8298 *pgdp = pgd;
8299 }
8300
8301 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable_64_types.h linux-3.0.8/arch/x86/include/asm/pgtable_64_types.h
8302 --- linux-3.0.8/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8303 +++ linux-3.0.8/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8304 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8305 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8306 #define MODULES_END _AC(0xffffffffff000000, UL)
8307 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8308 +#define MODULES_EXEC_VADDR MODULES_VADDR
8309 +#define MODULES_EXEC_END MODULES_END
8310 +
8311 +#define ktla_ktva(addr) (addr)
8312 +#define ktva_ktla(addr) (addr)
8313
8314 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8315 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable.h linux-3.0.8/arch/x86/include/asm/pgtable.h
8316 --- linux-3.0.8/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8317 +++ linux-3.0.8/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8318 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8319
8320 #ifndef __PAGETABLE_PUD_FOLDED
8321 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8322 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8323 #define pgd_clear(pgd) native_pgd_clear(pgd)
8324 #endif
8325
8326 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8327
8328 #define arch_end_context_switch(prev) do {} while(0)
8329
8330 +#define pax_open_kernel() native_pax_open_kernel()
8331 +#define pax_close_kernel() native_pax_close_kernel()
8332 #endif /* CONFIG_PARAVIRT */
8333
8334 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8335 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8336 +
8337 +#ifdef CONFIG_PAX_KERNEXEC
8338 +static inline unsigned long native_pax_open_kernel(void)
8339 +{
8340 + unsigned long cr0;
8341 +
8342 + preempt_disable();
8343 + barrier();
8344 + cr0 = read_cr0() ^ X86_CR0_WP;
8345 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8346 + write_cr0(cr0);
8347 + return cr0 ^ X86_CR0_WP;
8348 +}
8349 +
8350 +static inline unsigned long native_pax_close_kernel(void)
8351 +{
8352 + unsigned long cr0;
8353 +
8354 + cr0 = read_cr0() ^ X86_CR0_WP;
8355 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8356 + write_cr0(cr0);
8357 + barrier();
8358 + preempt_enable_no_resched();
8359 + return cr0 ^ X86_CR0_WP;
8360 +}
8361 +#else
8362 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8363 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8364 +#endif
8365 +
8366 /*
8367 * The following only work if pte_present() is true.
8368 * Undefined behaviour if not..
8369 */
8370 +static inline int pte_user(pte_t pte)
8371 +{
8372 + return pte_val(pte) & _PAGE_USER;
8373 +}
8374 +
8375 static inline int pte_dirty(pte_t pte)
8376 {
8377 return pte_flags(pte) & _PAGE_DIRTY;
8378 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8379 return pte_clear_flags(pte, _PAGE_RW);
8380 }
8381
8382 +static inline pte_t pte_mkread(pte_t pte)
8383 +{
8384 + return __pte(pte_val(pte) | _PAGE_USER);
8385 +}
8386 +
8387 static inline pte_t pte_mkexec(pte_t pte)
8388 {
8389 - return pte_clear_flags(pte, _PAGE_NX);
8390 +#ifdef CONFIG_X86_PAE
8391 + if (__supported_pte_mask & _PAGE_NX)
8392 + return pte_clear_flags(pte, _PAGE_NX);
8393 + else
8394 +#endif
8395 + return pte_set_flags(pte, _PAGE_USER);
8396 +}
8397 +
8398 +static inline pte_t pte_exprotect(pte_t pte)
8399 +{
8400 +#ifdef CONFIG_X86_PAE
8401 + if (__supported_pte_mask & _PAGE_NX)
8402 + return pte_set_flags(pte, _PAGE_NX);
8403 + else
8404 +#endif
8405 + return pte_clear_flags(pte, _PAGE_USER);
8406 }
8407
8408 static inline pte_t pte_mkdirty(pte_t pte)
8409 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8410 #endif
8411
8412 #ifndef __ASSEMBLY__
8413 +
8414 +#ifdef CONFIG_PAX_PER_CPU_PGD
8415 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8416 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8417 +{
8418 + return cpu_pgd[cpu];
8419 +}
8420 +#endif
8421 +
8422 #include <linux/mm_types.h>
8423
8424 static inline int pte_none(pte_t pte)
8425 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8426
8427 static inline int pgd_bad(pgd_t pgd)
8428 {
8429 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8430 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8431 }
8432
8433 static inline int pgd_none(pgd_t pgd)
8434 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8435 * pgd_offset() returns a (pgd_t *)
8436 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8437 */
8438 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8439 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8440 +
8441 +#ifdef CONFIG_PAX_PER_CPU_PGD
8442 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8443 +#endif
8444 +
8445 /*
8446 * a shortcut which implies the use of the kernel's pgd, instead
8447 * of a process's
8448 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8449 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8450 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8451
8452 +#ifdef CONFIG_X86_32
8453 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8454 +#else
8455 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8456 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8457 +
8458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8459 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8460 +#else
8461 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8462 +#endif
8463 +
8464 +#endif
8465 +
8466 #ifndef __ASSEMBLY__
8467
8468 extern int direct_gbpages;
8469 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8470 * dst and src can be on the same page, but the range must not overlap,
8471 * and must not cross a page boundary.
8472 */
8473 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8474 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8475 {
8476 - memcpy(dst, src, count * sizeof(pgd_t));
8477 + pax_open_kernel();
8478 + while (count--)
8479 + *dst++ = *src++;
8480 + pax_close_kernel();
8481 }
8482
8483 +#ifdef CONFIG_PAX_PER_CPU_PGD
8484 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8485 +#endif
8486 +
8487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8488 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8489 +#else
8490 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8491 +#endif
8492
8493 #include <asm-generic/pgtable.h>
8494 #endif /* __ASSEMBLY__ */
8495 diff -urNp linux-3.0.8/arch/x86/include/asm/pgtable_types.h linux-3.0.8/arch/x86/include/asm/pgtable_types.h
8496 --- linux-3.0.8/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8497 +++ linux-3.0.8/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8498 @@ -16,13 +16,12 @@
8499 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8500 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8501 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8502 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8503 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8504 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8505 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8506 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8507 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8508 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8509 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8510 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8511 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8512 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8513
8514 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8515 @@ -40,7 +39,6 @@
8516 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8517 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8518 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8519 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8520 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8521 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8522 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8523 @@ -57,8 +55,10 @@
8524
8525 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8526 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8527 -#else
8528 +#elif defined(CONFIG_KMEMCHECK)
8529 #define _PAGE_NX (_AT(pteval_t, 0))
8530 +#else
8531 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8532 #endif
8533
8534 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8535 @@ -96,6 +96,9 @@
8536 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8537 _PAGE_ACCESSED)
8538
8539 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8540 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8541 +
8542 #define __PAGE_KERNEL_EXEC \
8543 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8544 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8545 @@ -106,8 +109,8 @@
8546 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8547 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8548 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8549 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8550 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8551 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8552 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8553 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8554 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8555 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8556 @@ -166,8 +169,8 @@
8557 * bits are combined, this will alow user to access the high address mapped
8558 * VDSO in the presence of CONFIG_COMPAT_VDSO
8559 */
8560 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8561 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8562 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8563 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8564 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8565 #endif
8566
8567 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8568 {
8569 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8570 }
8571 +#endif
8572
8573 +#if PAGETABLE_LEVELS == 3
8574 +#include <asm-generic/pgtable-nopud.h>
8575 +#endif
8576 +
8577 +#if PAGETABLE_LEVELS == 2
8578 +#include <asm-generic/pgtable-nopmd.h>
8579 +#endif
8580 +
8581 +#ifndef __ASSEMBLY__
8582 #if PAGETABLE_LEVELS > 3
8583 typedef struct { pudval_t pud; } pud_t;
8584
8585 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8586 return pud.pud;
8587 }
8588 #else
8589 -#include <asm-generic/pgtable-nopud.h>
8590 -
8591 static inline pudval_t native_pud_val(pud_t pud)
8592 {
8593 return native_pgd_val(pud.pgd);
8594 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8595 return pmd.pmd;
8596 }
8597 #else
8598 -#include <asm-generic/pgtable-nopmd.h>
8599 -
8600 static inline pmdval_t native_pmd_val(pmd_t pmd)
8601 {
8602 return native_pgd_val(pmd.pud.pgd);
8603 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8604
8605 extern pteval_t __supported_pte_mask;
8606 extern void set_nx(void);
8607 -extern int nx_enabled;
8608
8609 #define pgprot_writecombine pgprot_writecombine
8610 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8611 diff -urNp linux-3.0.8/arch/x86/include/asm/processor.h linux-3.0.8/arch/x86/include/asm/processor.h
8612 --- linux-3.0.8/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8613 +++ linux-3.0.8/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8614 @@ -266,7 +266,7 @@ struct tss_struct {
8615
8616 } ____cacheline_aligned;
8617
8618 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8619 +extern struct tss_struct init_tss[NR_CPUS];
8620
8621 /*
8622 * Save the original ist values for checking stack pointers during debugging
8623 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8624 */
8625 #define TASK_SIZE PAGE_OFFSET
8626 #define TASK_SIZE_MAX TASK_SIZE
8627 +
8628 +#ifdef CONFIG_PAX_SEGMEXEC
8629 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8630 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8631 +#else
8632 #define STACK_TOP TASK_SIZE
8633 -#define STACK_TOP_MAX STACK_TOP
8634 +#endif
8635 +
8636 +#define STACK_TOP_MAX TASK_SIZE
8637
8638 #define INIT_THREAD { \
8639 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8640 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8641 .vm86_info = NULL, \
8642 .sysenter_cs = __KERNEL_CS, \
8643 .io_bitmap_ptr = NULL, \
8644 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8645 */
8646 #define INIT_TSS { \
8647 .x86_tss = { \
8648 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8649 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8650 .ss0 = __KERNEL_DS, \
8651 .ss1 = __KERNEL_CS, \
8652 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8653 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8654 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8655
8656 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8657 -#define KSTK_TOP(info) \
8658 -({ \
8659 - unsigned long *__ptr = (unsigned long *)(info); \
8660 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8661 -})
8662 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8663
8664 /*
8665 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8666 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8667 #define task_pt_regs(task) \
8668 ({ \
8669 struct pt_regs *__regs__; \
8670 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8671 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8672 __regs__ - 1; \
8673 })
8674
8675 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8676 /*
8677 * User space process size. 47bits minus one guard page.
8678 */
8679 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8680 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8681
8682 /* This decides where the kernel will search for a free chunk of vm
8683 * space during mmap's.
8684 */
8685 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8686 - 0xc0000000 : 0xFFFFe000)
8687 + 0xc0000000 : 0xFFFFf000)
8688
8689 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8690 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8691 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8692 #define STACK_TOP_MAX TASK_SIZE_MAX
8693
8694 #define INIT_THREAD { \
8695 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8696 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8697 }
8698
8699 #define INIT_TSS { \
8700 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8701 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8702 }
8703
8704 /*
8705 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8706 */
8707 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8708
8709 +#ifdef CONFIG_PAX_SEGMEXEC
8710 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8711 +#endif
8712 +
8713 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8714
8715 /* Get/set a process' ability to use the timestamp counter instruction */
8716 diff -urNp linux-3.0.8/arch/x86/include/asm/ptrace.h linux-3.0.8/arch/x86/include/asm/ptrace.h
8717 --- linux-3.0.8/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8718 +++ linux-3.0.8/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8719 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8720 }
8721
8722 /*
8723 - * user_mode_vm(regs) determines whether a register set came from user mode.
8724 + * user_mode(regs) determines whether a register set came from user mode.
8725 * This is true if V8086 mode was enabled OR if the register set was from
8726 * protected mode with RPL-3 CS value. This tricky test checks that with
8727 * one comparison. Many places in the kernel can bypass this full check
8728 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8729 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8730 + * be used.
8731 */
8732 -static inline int user_mode(struct pt_regs *regs)
8733 +static inline int user_mode_novm(struct pt_regs *regs)
8734 {
8735 #ifdef CONFIG_X86_32
8736 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8737 #else
8738 - return !!(regs->cs & 3);
8739 + return !!(regs->cs & SEGMENT_RPL_MASK);
8740 #endif
8741 }
8742
8743 -static inline int user_mode_vm(struct pt_regs *regs)
8744 +static inline int user_mode(struct pt_regs *regs)
8745 {
8746 #ifdef CONFIG_X86_32
8747 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8748 USER_RPL;
8749 #else
8750 - return user_mode(regs);
8751 + return user_mode_novm(regs);
8752 #endif
8753 }
8754
8755 diff -urNp linux-3.0.8/arch/x86/include/asm/reboot.h linux-3.0.8/arch/x86/include/asm/reboot.h
8756 --- linux-3.0.8/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8757 +++ linux-3.0.8/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8758 @@ -6,19 +6,19 @@
8759 struct pt_regs;
8760
8761 struct machine_ops {
8762 - void (*restart)(char *cmd);
8763 - void (*halt)(void);
8764 - void (*power_off)(void);
8765 + void (* __noreturn restart)(char *cmd);
8766 + void (* __noreturn halt)(void);
8767 + void (* __noreturn power_off)(void);
8768 void (*shutdown)(void);
8769 void (*crash_shutdown)(struct pt_regs *);
8770 - void (*emergency_restart)(void);
8771 -};
8772 + void (* __noreturn emergency_restart)(void);
8773 +} __no_const;
8774
8775 extern struct machine_ops machine_ops;
8776
8777 void native_machine_crash_shutdown(struct pt_regs *regs);
8778 void native_machine_shutdown(void);
8779 -void machine_real_restart(unsigned int type);
8780 +void machine_real_restart(unsigned int type) __noreturn;
8781 /* These must match dispatch_table in reboot_32.S */
8782 #define MRR_BIOS 0
8783 #define MRR_APM 1
8784 diff -urNp linux-3.0.8/arch/x86/include/asm/rwsem.h linux-3.0.8/arch/x86/include/asm/rwsem.h
8785 --- linux-3.0.8/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8786 +++ linux-3.0.8/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8787 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8788 {
8789 asm volatile("# beginning down_read\n\t"
8790 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8791 +
8792 +#ifdef CONFIG_PAX_REFCOUNT
8793 + "jno 0f\n"
8794 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8795 + "int $4\n0:\n"
8796 + _ASM_EXTABLE(0b, 0b)
8797 +#endif
8798 +
8799 /* adds 0x00000001 */
8800 " jns 1f\n"
8801 " call call_rwsem_down_read_failed\n"
8802 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8803 "1:\n\t"
8804 " mov %1,%2\n\t"
8805 " add %3,%2\n\t"
8806 +
8807 +#ifdef CONFIG_PAX_REFCOUNT
8808 + "jno 0f\n"
8809 + "sub %3,%2\n"
8810 + "int $4\n0:\n"
8811 + _ASM_EXTABLE(0b, 0b)
8812 +#endif
8813 +
8814 " jle 2f\n\t"
8815 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8816 " jnz 1b\n\t"
8817 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8818 long tmp;
8819 asm volatile("# beginning down_write\n\t"
8820 LOCK_PREFIX " xadd %1,(%2)\n\t"
8821 +
8822 +#ifdef CONFIG_PAX_REFCOUNT
8823 + "jno 0f\n"
8824 + "mov %1,(%2)\n"
8825 + "int $4\n0:\n"
8826 + _ASM_EXTABLE(0b, 0b)
8827 +#endif
8828 +
8829 /* adds 0xffff0001, returns the old value */
8830 " test %1,%1\n\t"
8831 /* was the count 0 before? */
8832 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8833 long tmp;
8834 asm volatile("# beginning __up_read\n\t"
8835 LOCK_PREFIX " xadd %1,(%2)\n\t"
8836 +
8837 +#ifdef CONFIG_PAX_REFCOUNT
8838 + "jno 0f\n"
8839 + "mov %1,(%2)\n"
8840 + "int $4\n0:\n"
8841 + _ASM_EXTABLE(0b, 0b)
8842 +#endif
8843 +
8844 /* subtracts 1, returns the old value */
8845 " jns 1f\n\t"
8846 " call call_rwsem_wake\n" /* expects old value in %edx */
8847 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8848 long tmp;
8849 asm volatile("# beginning __up_write\n\t"
8850 LOCK_PREFIX " xadd %1,(%2)\n\t"
8851 +
8852 +#ifdef CONFIG_PAX_REFCOUNT
8853 + "jno 0f\n"
8854 + "mov %1,(%2)\n"
8855 + "int $4\n0:\n"
8856 + _ASM_EXTABLE(0b, 0b)
8857 +#endif
8858 +
8859 /* subtracts 0xffff0001, returns the old value */
8860 " jns 1f\n\t"
8861 " call call_rwsem_wake\n" /* expects old value in %edx */
8862 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8863 {
8864 asm volatile("# beginning __downgrade_write\n\t"
8865 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8866 +
8867 +#ifdef CONFIG_PAX_REFCOUNT
8868 + "jno 0f\n"
8869 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8870 + "int $4\n0:\n"
8871 + _ASM_EXTABLE(0b, 0b)
8872 +#endif
8873 +
8874 /*
8875 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8876 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8877 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8878 */
8879 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8880 {
8881 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8882 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8883 +
8884 +#ifdef CONFIG_PAX_REFCOUNT
8885 + "jno 0f\n"
8886 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8887 + "int $4\n0:\n"
8888 + _ASM_EXTABLE(0b, 0b)
8889 +#endif
8890 +
8891 : "+m" (sem->count)
8892 : "er" (delta));
8893 }
8894 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8895 {
8896 long tmp = delta;
8897
8898 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8899 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8900 +
8901 +#ifdef CONFIG_PAX_REFCOUNT
8902 + "jno 0f\n"
8903 + "mov %0,%1\n"
8904 + "int $4\n0:\n"
8905 + _ASM_EXTABLE(0b, 0b)
8906 +#endif
8907 +
8908 : "+r" (tmp), "+m" (sem->count)
8909 : : "memory");
8910
8911 diff -urNp linux-3.0.8/arch/x86/include/asm/segment.h linux-3.0.8/arch/x86/include/asm/segment.h
8912 --- linux-3.0.8/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8913 +++ linux-3.0.8/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8914 @@ -64,10 +64,15 @@
8915 * 26 - ESPFIX small SS
8916 * 27 - per-cpu [ offset to per-cpu data area ]
8917 * 28 - stack_canary-20 [ for stack protector ]
8918 - * 29 - unused
8919 - * 30 - unused
8920 + * 29 - PCI BIOS CS
8921 + * 30 - PCI BIOS DS
8922 * 31 - TSS for double fault handler
8923 */
8924 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8925 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8926 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8927 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8928 +
8929 #define GDT_ENTRY_TLS_MIN 6
8930 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8931
8932 @@ -79,6 +84,8 @@
8933
8934 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8935
8936 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8937 +
8938 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8939
8940 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8941 @@ -104,6 +111,12 @@
8942 #define __KERNEL_STACK_CANARY 0
8943 #endif
8944
8945 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8946 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8947 +
8948 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8949 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8950 +
8951 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8952
8953 /*
8954 @@ -141,7 +154,7 @@
8955 */
8956
8957 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8958 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8959 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8960
8961
8962 #else
8963 @@ -165,6 +178,8 @@
8964 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8965 #define __USER32_DS __USER_DS
8966
8967 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8968 +
8969 #define GDT_ENTRY_TSS 8 /* needs two entries */
8970 #define GDT_ENTRY_LDT 10 /* needs two entries */
8971 #define GDT_ENTRY_TLS_MIN 12
8972 @@ -185,6 +200,7 @@
8973 #endif
8974
8975 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8976 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8977 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8978 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8979 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8980 diff -urNp linux-3.0.8/arch/x86/include/asm/smp.h linux-3.0.8/arch/x86/include/asm/smp.h
8981 --- linux-3.0.8/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8982 +++ linux-3.0.8/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8983 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8984 /* cpus sharing the last level cache: */
8985 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8986 DECLARE_PER_CPU(u16, cpu_llc_id);
8987 -DECLARE_PER_CPU(int, cpu_number);
8988 +DECLARE_PER_CPU(unsigned int, cpu_number);
8989
8990 static inline struct cpumask *cpu_sibling_mask(int cpu)
8991 {
8992 @@ -77,7 +77,7 @@ struct smp_ops {
8993
8994 void (*send_call_func_ipi)(const struct cpumask *mask);
8995 void (*send_call_func_single_ipi)(int cpu);
8996 -};
8997 +} __no_const;
8998
8999 /* Globals due to paravirt */
9000 extern void set_cpu_sibling_map(int cpu);
9001 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
9002 extern int safe_smp_processor_id(void);
9003
9004 #elif defined(CONFIG_X86_64_SMP)
9005 -#define raw_smp_processor_id() (percpu_read(cpu_number))
9006 -
9007 -#define stack_smp_processor_id() \
9008 -({ \
9009 - struct thread_info *ti; \
9010 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9011 - ti->cpu; \
9012 -})
9013 +#define raw_smp_processor_id() (percpu_read(cpu_number))
9014 +#define stack_smp_processor_id() raw_smp_processor_id()
9015 #define safe_smp_processor_id() smp_processor_id()
9016
9017 #endif
9018 diff -urNp linux-3.0.8/arch/x86/include/asm/spinlock.h linux-3.0.8/arch/x86/include/asm/spinlock.h
9019 --- linux-3.0.8/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
9020 +++ linux-3.0.8/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
9021 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
9022 static inline void arch_read_lock(arch_rwlock_t *rw)
9023 {
9024 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9025 +
9026 +#ifdef CONFIG_PAX_REFCOUNT
9027 + "jno 0f\n"
9028 + LOCK_PREFIX " addl $1,(%0)\n"
9029 + "int $4\n0:\n"
9030 + _ASM_EXTABLE(0b, 0b)
9031 +#endif
9032 +
9033 "jns 1f\n"
9034 "call __read_lock_failed\n\t"
9035 "1:\n"
9036 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
9037 static inline void arch_write_lock(arch_rwlock_t *rw)
9038 {
9039 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9040 +
9041 +#ifdef CONFIG_PAX_REFCOUNT
9042 + "jno 0f\n"
9043 + LOCK_PREFIX " addl %1,(%0)\n"
9044 + "int $4\n0:\n"
9045 + _ASM_EXTABLE(0b, 0b)
9046 +#endif
9047 +
9048 "jz 1f\n"
9049 "call __write_lock_failed\n\t"
9050 "1:\n"
9051 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
9052
9053 static inline void arch_read_unlock(arch_rwlock_t *rw)
9054 {
9055 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9056 + asm volatile(LOCK_PREFIX "incl %0\n"
9057 +
9058 +#ifdef CONFIG_PAX_REFCOUNT
9059 + "jno 0f\n"
9060 + LOCK_PREFIX "decl %0\n"
9061 + "int $4\n0:\n"
9062 + _ASM_EXTABLE(0b, 0b)
9063 +#endif
9064 +
9065 + :"+m" (rw->lock) : : "memory");
9066 }
9067
9068 static inline void arch_write_unlock(arch_rwlock_t *rw)
9069 {
9070 - asm volatile(LOCK_PREFIX "addl %1, %0"
9071 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
9072 +
9073 +#ifdef CONFIG_PAX_REFCOUNT
9074 + "jno 0f\n"
9075 + LOCK_PREFIX "subl %1, %0\n"
9076 + "int $4\n0:\n"
9077 + _ASM_EXTABLE(0b, 0b)
9078 +#endif
9079 +
9080 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9081 }
9082
9083 diff -urNp linux-3.0.8/arch/x86/include/asm/stackprotector.h linux-3.0.8/arch/x86/include/asm/stackprotector.h
9084 --- linux-3.0.8/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
9085 +++ linux-3.0.8/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
9086 @@ -48,7 +48,7 @@
9087 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9088 */
9089 #define GDT_STACK_CANARY_INIT \
9090 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9091 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9092
9093 /*
9094 * Initialize the stackprotector canary value.
9095 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9096
9097 static inline void load_stack_canary_segment(void)
9098 {
9099 -#ifdef CONFIG_X86_32
9100 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9101 asm volatile ("mov %0, %%gs" : : "r" (0));
9102 #endif
9103 }
9104 diff -urNp linux-3.0.8/arch/x86/include/asm/stacktrace.h linux-3.0.8/arch/x86/include/asm/stacktrace.h
9105 --- linux-3.0.8/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
9106 +++ linux-3.0.8/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
9107 @@ -11,28 +11,20 @@
9108
9109 extern int kstack_depth_to_print;
9110
9111 -struct thread_info;
9112 +struct task_struct;
9113 struct stacktrace_ops;
9114
9115 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9116 - unsigned long *stack,
9117 - unsigned long bp,
9118 - const struct stacktrace_ops *ops,
9119 - void *data,
9120 - unsigned long *end,
9121 - int *graph);
9122 -
9123 -extern unsigned long
9124 -print_context_stack(struct thread_info *tinfo,
9125 - unsigned long *stack, unsigned long bp,
9126 - const struct stacktrace_ops *ops, void *data,
9127 - unsigned long *end, int *graph);
9128 -
9129 -extern unsigned long
9130 -print_context_stack_bp(struct thread_info *tinfo,
9131 - unsigned long *stack, unsigned long bp,
9132 - const struct stacktrace_ops *ops, void *data,
9133 - unsigned long *end, int *graph);
9134 +typedef unsigned long walk_stack_t(struct task_struct *task,
9135 + void *stack_start,
9136 + unsigned long *stack,
9137 + unsigned long bp,
9138 + const struct stacktrace_ops *ops,
9139 + void *data,
9140 + unsigned long *end,
9141 + int *graph);
9142 +
9143 +extern walk_stack_t print_context_stack;
9144 +extern walk_stack_t print_context_stack_bp;
9145
9146 /* Generic stack tracer with callbacks */
9147
9148 @@ -40,7 +32,7 @@ struct stacktrace_ops {
9149 void (*address)(void *data, unsigned long address, int reliable);
9150 /* On negative return stop dumping */
9151 int (*stack)(void *data, char *name);
9152 - walk_stack_t walk_stack;
9153 + walk_stack_t *walk_stack;
9154 };
9155
9156 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9157 diff -urNp linux-3.0.8/arch/x86/include/asm/sys_ia32.h linux-3.0.8/arch/x86/include/asm/sys_ia32.h
9158 --- linux-3.0.8/arch/x86/include/asm/sys_ia32.h 2011-07-21 22:17:23.000000000 -0400
9159 +++ linux-3.0.8/arch/x86/include/asm/sys_ia32.h 2011-10-06 04:17:55.000000000 -0400
9160 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9161 compat_sigset_t __user *, unsigned int);
9162 asmlinkage long sys32_alarm(unsigned int);
9163
9164 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9165 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9166 asmlinkage long sys32_sysfs(int, u32, u32);
9167
9168 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9169 diff -urNp linux-3.0.8/arch/x86/include/asm/system.h linux-3.0.8/arch/x86/include/asm/system.h
9170 --- linux-3.0.8/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
9171 +++ linux-3.0.8/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
9172 @@ -129,7 +129,7 @@ do { \
9173 "call __switch_to\n\t" \
9174 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9175 __switch_canary \
9176 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
9177 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9178 "movq %%rax,%%rdi\n\t" \
9179 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9180 "jnz ret_from_fork\n\t" \
9181 @@ -140,7 +140,7 @@ do { \
9182 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9183 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9184 [_tif_fork] "i" (_TIF_FORK), \
9185 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
9186 + [thread_info] "m" (current_tinfo), \
9187 [current_task] "m" (current_task) \
9188 __switch_canary_iparam \
9189 : "memory", "cc" __EXTRA_CLOBBER)
9190 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9191 {
9192 unsigned long __limit;
9193 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9194 - return __limit + 1;
9195 + return __limit;
9196 }
9197
9198 static inline void native_clts(void)
9199 @@ -397,12 +397,12 @@ void enable_hlt(void);
9200
9201 void cpu_idle_wait(void);
9202
9203 -extern unsigned long arch_align_stack(unsigned long sp);
9204 +#define arch_align_stack(x) ((x) & ~0xfUL)
9205 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9206
9207 void default_idle(void);
9208
9209 -void stop_this_cpu(void *dummy);
9210 +void stop_this_cpu(void *dummy) __noreturn;
9211
9212 /*
9213 * Force strict CPU ordering.
9214 diff -urNp linux-3.0.8/arch/x86/include/asm/thread_info.h linux-3.0.8/arch/x86/include/asm/thread_info.h
9215 --- linux-3.0.8/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9216 +++ linux-3.0.8/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9217 @@ -10,6 +10,7 @@
9218 #include <linux/compiler.h>
9219 #include <asm/page.h>
9220 #include <asm/types.h>
9221 +#include <asm/percpu.h>
9222
9223 /*
9224 * low level task data that entry.S needs immediate access to
9225 @@ -24,7 +25,6 @@ struct exec_domain;
9226 #include <asm/atomic.h>
9227
9228 struct thread_info {
9229 - struct task_struct *task; /* main task structure */
9230 struct exec_domain *exec_domain; /* execution domain */
9231 __u32 flags; /* low level flags */
9232 __u32 status; /* thread synchronous flags */
9233 @@ -34,18 +34,12 @@ struct thread_info {
9234 mm_segment_t addr_limit;
9235 struct restart_block restart_block;
9236 void __user *sysenter_return;
9237 -#ifdef CONFIG_X86_32
9238 - unsigned long previous_esp; /* ESP of the previous stack in
9239 - case of nested (IRQ) stacks
9240 - */
9241 - __u8 supervisor_stack[0];
9242 -#endif
9243 + unsigned long lowest_stack;
9244 int uaccess_err;
9245 };
9246
9247 -#define INIT_THREAD_INFO(tsk) \
9248 +#define INIT_THREAD_INFO \
9249 { \
9250 - .task = &tsk, \
9251 .exec_domain = &default_exec_domain, \
9252 .flags = 0, \
9253 .cpu = 0, \
9254 @@ -56,7 +50,7 @@ struct thread_info {
9255 }, \
9256 }
9257
9258 -#define init_thread_info (init_thread_union.thread_info)
9259 +#define init_thread_info (init_thread_union.stack)
9260 #define init_stack (init_thread_union.stack)
9261
9262 #else /* !__ASSEMBLY__ */
9263 @@ -170,6 +164,23 @@ struct thread_info {
9264 ret; \
9265 })
9266
9267 +#ifdef __ASSEMBLY__
9268 +/* how to get the thread information struct from ASM */
9269 +#define GET_THREAD_INFO(reg) \
9270 + mov PER_CPU_VAR(current_tinfo), reg
9271 +
9272 +/* use this one if reg already contains %esp */
9273 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9274 +#else
9275 +/* how to get the thread information struct from C */
9276 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9277 +
9278 +static __always_inline struct thread_info *current_thread_info(void)
9279 +{
9280 + return percpu_read_stable(current_tinfo);
9281 +}
9282 +#endif
9283 +
9284 #ifdef CONFIG_X86_32
9285
9286 #define STACK_WARN (THREAD_SIZE/8)
9287 @@ -180,35 +191,13 @@ struct thread_info {
9288 */
9289 #ifndef __ASSEMBLY__
9290
9291 -
9292 /* how to get the current stack pointer from C */
9293 register unsigned long current_stack_pointer asm("esp") __used;
9294
9295 -/* how to get the thread information struct from C */
9296 -static inline struct thread_info *current_thread_info(void)
9297 -{
9298 - return (struct thread_info *)
9299 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9300 -}
9301 -
9302 -#else /* !__ASSEMBLY__ */
9303 -
9304 -/* how to get the thread information struct from ASM */
9305 -#define GET_THREAD_INFO(reg) \
9306 - movl $-THREAD_SIZE, reg; \
9307 - andl %esp, reg
9308 -
9309 -/* use this one if reg already contains %esp */
9310 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9311 - andl $-THREAD_SIZE, reg
9312 -
9313 #endif
9314
9315 #else /* X86_32 */
9316
9317 -#include <asm/percpu.h>
9318 -#define KERNEL_STACK_OFFSET (5*8)
9319 -
9320 /*
9321 * macros/functions for gaining access to the thread information structure
9322 * preempt_count needs to be 1 initially, until the scheduler is functional.
9323 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9324 #ifndef __ASSEMBLY__
9325 DECLARE_PER_CPU(unsigned long, kernel_stack);
9326
9327 -static inline struct thread_info *current_thread_info(void)
9328 -{
9329 - struct thread_info *ti;
9330 - ti = (void *)(percpu_read_stable(kernel_stack) +
9331 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9332 - return ti;
9333 -}
9334 -
9335 -#else /* !__ASSEMBLY__ */
9336 -
9337 -/* how to get the thread information struct from ASM */
9338 -#define GET_THREAD_INFO(reg) \
9339 - movq PER_CPU_VAR(kernel_stack),reg ; \
9340 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9341 -
9342 +/* how to get the current stack pointer from C */
9343 +register unsigned long current_stack_pointer asm("rsp") __used;
9344 #endif
9345
9346 #endif /* !X86_32 */
9347 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9348 extern void free_thread_info(struct thread_info *ti);
9349 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9350 #define arch_task_cache_init arch_task_cache_init
9351 +
9352 +#define __HAVE_THREAD_FUNCTIONS
9353 +#define task_thread_info(task) (&(task)->tinfo)
9354 +#define task_stack_page(task) ((task)->stack)
9355 +#define setup_thread_stack(p, org) do {} while (0)
9356 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9357 +
9358 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9359 +extern struct task_struct *alloc_task_struct_node(int node);
9360 +extern void free_task_struct(struct task_struct *);
9361 +
9362 #endif
9363 #endif /* _ASM_X86_THREAD_INFO_H */
9364 diff -urNp linux-3.0.8/arch/x86/include/asm/uaccess_32.h linux-3.0.8/arch/x86/include/asm/uaccess_32.h
9365 --- linux-3.0.8/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9366 +++ linux-3.0.8/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9367 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9368 static __always_inline unsigned long __must_check
9369 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9370 {
9371 + pax_track_stack();
9372 +
9373 + if ((long)n < 0)
9374 + return n;
9375 +
9376 if (__builtin_constant_p(n)) {
9377 unsigned long ret;
9378
9379 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9380 return ret;
9381 }
9382 }
9383 + if (!__builtin_constant_p(n))
9384 + check_object_size(from, n, true);
9385 return __copy_to_user_ll(to, from, n);
9386 }
9387
9388 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9389 __copy_to_user(void __user *to, const void *from, unsigned long n)
9390 {
9391 might_fault();
9392 +
9393 return __copy_to_user_inatomic(to, from, n);
9394 }
9395
9396 static __always_inline unsigned long
9397 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9398 {
9399 + if ((long)n < 0)
9400 + return n;
9401 +
9402 /* Avoid zeroing the tail if the copy fails..
9403 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9404 * but as the zeroing behaviour is only significant when n is not
9405 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9406 __copy_from_user(void *to, const void __user *from, unsigned long n)
9407 {
9408 might_fault();
9409 +
9410 + pax_track_stack();
9411 +
9412 + if ((long)n < 0)
9413 + return n;
9414 +
9415 if (__builtin_constant_p(n)) {
9416 unsigned long ret;
9417
9418 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9419 return ret;
9420 }
9421 }
9422 + if (!__builtin_constant_p(n))
9423 + check_object_size(to, n, false);
9424 return __copy_from_user_ll(to, from, n);
9425 }
9426
9427 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9428 const void __user *from, unsigned long n)
9429 {
9430 might_fault();
9431 +
9432 + if ((long)n < 0)
9433 + return n;
9434 +
9435 if (__builtin_constant_p(n)) {
9436 unsigned long ret;
9437
9438 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9439 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9440 unsigned long n)
9441 {
9442 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9443 -}
9444 + if ((long)n < 0)
9445 + return n;
9446
9447 -unsigned long __must_check copy_to_user(void __user *to,
9448 - const void *from, unsigned long n);
9449 -unsigned long __must_check _copy_from_user(void *to,
9450 - const void __user *from,
9451 - unsigned long n);
9452 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9453 +}
9454
9455 +extern void copy_to_user_overflow(void)
9456 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9457 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9458 +#else
9459 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9460 +#endif
9461 +;
9462
9463 extern void copy_from_user_overflow(void)
9464 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9465 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9466 #endif
9467 ;
9468
9469 -static inline unsigned long __must_check copy_from_user(void *to,
9470 - const void __user *from,
9471 - unsigned long n)
9472 +/**
9473 + * copy_to_user: - Copy a block of data into user space.
9474 + * @to: Destination address, in user space.
9475 + * @from: Source address, in kernel space.
9476 + * @n: Number of bytes to copy.
9477 + *
9478 + * Context: User context only. This function may sleep.
9479 + *
9480 + * Copy data from kernel space to user space.
9481 + *
9482 + * Returns number of bytes that could not be copied.
9483 + * On success, this will be zero.
9484 + */
9485 +static inline unsigned long __must_check
9486 +copy_to_user(void __user *to, const void *from, unsigned long n)
9487 +{
9488 + int sz = __compiletime_object_size(from);
9489 +
9490 + if (unlikely(sz != -1 && sz < n))
9491 + copy_to_user_overflow();
9492 + else if (access_ok(VERIFY_WRITE, to, n))
9493 + n = __copy_to_user(to, from, n);
9494 + return n;
9495 +}
9496 +
9497 +/**
9498 + * copy_from_user: - Copy a block of data from user space.
9499 + * @to: Destination address, in kernel space.
9500 + * @from: Source address, in user space.
9501 + * @n: Number of bytes to copy.
9502 + *
9503 + * Context: User context only. This function may sleep.
9504 + *
9505 + * Copy data from user space to kernel space.
9506 + *
9507 + * Returns number of bytes that could not be copied.
9508 + * On success, this will be zero.
9509 + *
9510 + * If some data could not be copied, this function will pad the copied
9511 + * data to the requested size using zero bytes.
9512 + */
9513 +static inline unsigned long __must_check
9514 +copy_from_user(void *to, const void __user *from, unsigned long n)
9515 {
9516 int sz = __compiletime_object_size(to);
9517
9518 - if (likely(sz == -1 || sz >= n))
9519 - n = _copy_from_user(to, from, n);
9520 - else
9521 + if (unlikely(sz != -1 && sz < n))
9522 copy_from_user_overflow();
9523 -
9524 + else if (access_ok(VERIFY_READ, from, n))
9525 + n = __copy_from_user(to, from, n);
9526 + else if ((long)n > 0) {
9527 + if (!__builtin_constant_p(n))
9528 + check_object_size(to, n, false);
9529 + memset(to, 0, n);
9530 + }
9531 return n;
9532 }
9533
9534 diff -urNp linux-3.0.8/arch/x86/include/asm/uaccess_64.h linux-3.0.8/arch/x86/include/asm/uaccess_64.h
9535 --- linux-3.0.8/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9536 +++ linux-3.0.8/arch/x86/include/asm/uaccess_64.h 2011-10-06 04:17:55.000000000 -0400
9537 @@ -10,6 +10,9 @@
9538 #include <asm/alternative.h>
9539 #include <asm/cpufeature.h>
9540 #include <asm/page.h>
9541 +#include <asm/pgtable.h>
9542 +
9543 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9544
9545 /*
9546 * Copy To/From Userspace
9547 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9548 return ret;
9549 }
9550
9551 -__must_check unsigned long
9552 -_copy_to_user(void __user *to, const void *from, unsigned len);
9553 -__must_check unsigned long
9554 -_copy_from_user(void *to, const void __user *from, unsigned len);
9555 +static __always_inline __must_check unsigned long
9556 +__copy_to_user(void __user *to, const void *from, unsigned len);
9557 +static __always_inline __must_check unsigned long
9558 +__copy_from_user(void *to, const void __user *from, unsigned len);
9559 __must_check unsigned long
9560 copy_in_user(void __user *to, const void __user *from, unsigned len);
9561
9562 static inline unsigned long __must_check copy_from_user(void *to,
9563 const void __user *from,
9564 - unsigned long n)
9565 + unsigned n)
9566 {
9567 - int sz = __compiletime_object_size(to);
9568 -
9569 might_fault();
9570 - if (likely(sz == -1 || sz >= n))
9571 - n = _copy_from_user(to, from, n);
9572 -#ifdef CONFIG_DEBUG_VM
9573 - else
9574 - WARN(1, "Buffer overflow detected!\n");
9575 -#endif
9576 +
9577 + if (access_ok(VERIFY_READ, from, n))
9578 + n = __copy_from_user(to, from, n);
9579 + else if ((int)n > 0) {
9580 + if (!__builtin_constant_p(n))
9581 + check_object_size(to, n, false);
9582 + memset(to, 0, n);
9583 + }
9584 return n;
9585 }
9586
9587 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9588 {
9589 might_fault();
9590
9591 - return _copy_to_user(dst, src, size);
9592 + if (access_ok(VERIFY_WRITE, dst, size))
9593 + size = __copy_to_user(dst, src, size);
9594 + return size;
9595 }
9596
9597 static __always_inline __must_check
9598 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9599 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9600 {
9601 - int ret = 0;
9602 + int sz = __compiletime_object_size(dst);
9603 + unsigned ret = 0;
9604
9605 might_fault();
9606 - if (!__builtin_constant_p(size))
9607 - return copy_user_generic(dst, (__force void *)src, size);
9608 +
9609 + pax_track_stack();
9610 +
9611 + if ((int)size < 0)
9612 + return size;
9613 +
9614 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9615 + if (!__access_ok(VERIFY_READ, src, size))
9616 + return size;
9617 +#endif
9618 +
9619 + if (unlikely(sz != -1 && sz < size)) {
9620 +#ifdef CONFIG_DEBUG_VM
9621 + WARN(1, "Buffer overflow detected!\n");
9622 +#endif
9623 + return size;
9624 + }
9625 +
9626 + if (!__builtin_constant_p(size)) {
9627 + check_object_size(dst, size, false);
9628 +
9629 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9630 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9631 + src += PAX_USER_SHADOW_BASE;
9632 +#endif
9633 +
9634 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
9635 + }
9636 switch (size) {
9637 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9638 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9639 ret, "b", "b", "=q", 1);
9640 return ret;
9641 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9642 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9643 ret, "w", "w", "=r", 2);
9644 return ret;
9645 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9646 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9647 ret, "l", "k", "=r", 4);
9648 return ret;
9649 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9650 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9651 ret, "q", "", "=r", 8);
9652 return ret;
9653 case 10:
9654 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9655 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9656 ret, "q", "", "=r", 10);
9657 if (unlikely(ret))
9658 return ret;
9659 __get_user_asm(*(u16 *)(8 + (char *)dst),
9660 - (u16 __user *)(8 + (char __user *)src),
9661 + (const u16 __user *)(8 + (const char __user *)src),
9662 ret, "w", "w", "=r", 2);
9663 return ret;
9664 case 16:
9665 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9666 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9667 ret, "q", "", "=r", 16);
9668 if (unlikely(ret))
9669 return ret;
9670 __get_user_asm(*(u64 *)(8 + (char *)dst),
9671 - (u64 __user *)(8 + (char __user *)src),
9672 + (const u64 __user *)(8 + (const char __user *)src),
9673 ret, "q", "", "=r", 8);
9674 return ret;
9675 default:
9676 - return copy_user_generic(dst, (__force void *)src, size);
9677 +
9678 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9679 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9680 + src += PAX_USER_SHADOW_BASE;
9681 +#endif
9682 +
9683 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
9684 }
9685 }
9686
9687 static __always_inline __must_check
9688 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9689 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9690 {
9691 - int ret = 0;
9692 + int sz = __compiletime_object_size(src);
9693 + unsigned ret = 0;
9694
9695 might_fault();
9696 - if (!__builtin_constant_p(size))
9697 - return copy_user_generic((__force void *)dst, src, size);
9698 +
9699 + pax_track_stack();
9700 +
9701 + if ((int)size < 0)
9702 + return size;
9703 +
9704 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9705 + if (!__access_ok(VERIFY_WRITE, dst, size))
9706 + return size;
9707 +#endif
9708 +
9709 + if (unlikely(sz != -1 && sz < size)) {
9710 +#ifdef CONFIG_DEBUG_VM
9711 + WARN(1, "Buffer overflow detected!\n");
9712 +#endif
9713 + return size;
9714 + }
9715 +
9716 + if (!__builtin_constant_p(size)) {
9717 + check_object_size(src, size, true);
9718 +
9719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9720 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9721 + dst += PAX_USER_SHADOW_BASE;
9722 +#endif
9723 +
9724 + return copy_user_generic((__force_kernel void *)dst, src, size);
9725 + }
9726 switch (size) {
9727 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9728 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9729 ret, "b", "b", "iq", 1);
9730 return ret;
9731 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9732 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9733 ret, "w", "w", "ir", 2);
9734 return ret;
9735 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9736 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9737 ret, "l", "k", "ir", 4);
9738 return ret;
9739 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9740 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9741 ret, "q", "", "er", 8);
9742 return ret;
9743 case 10:
9744 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9745 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9746 ret, "q", "", "er", 10);
9747 if (unlikely(ret))
9748 return ret;
9749 asm("":::"memory");
9750 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9751 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9752 ret, "w", "w", "ir", 2);
9753 return ret;
9754 case 16:
9755 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9756 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9757 ret, "q", "", "er", 16);
9758 if (unlikely(ret))
9759 return ret;
9760 asm("":::"memory");
9761 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9762 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9763 ret, "q", "", "er", 8);
9764 return ret;
9765 default:
9766 - return copy_user_generic((__force void *)dst, src, size);
9767 +
9768 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9769 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9770 + dst += PAX_USER_SHADOW_BASE;
9771 +#endif
9772 +
9773 + return copy_user_generic((__force_kernel void *)dst, src, size);
9774 }
9775 }
9776
9777 static __always_inline __must_check
9778 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9779 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9780 {
9781 - int ret = 0;
9782 + unsigned ret = 0;
9783
9784 might_fault();
9785 - if (!__builtin_constant_p(size))
9786 - return copy_user_generic((__force void *)dst,
9787 - (__force void *)src, size);
9788 +
9789 + if ((int)size < 0)
9790 + return size;
9791 +
9792 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9793 + if (!__access_ok(VERIFY_READ, src, size))
9794 + return size;
9795 + if (!__access_ok(VERIFY_WRITE, dst, size))
9796 + return size;
9797 +#endif
9798 +
9799 + if (!__builtin_constant_p(size)) {
9800 +
9801 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9802 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9803 + src += PAX_USER_SHADOW_BASE;
9804 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9805 + dst += PAX_USER_SHADOW_BASE;
9806 +#endif
9807 +
9808 + return copy_user_generic((__force_kernel void *)dst,
9809 + (__force_kernel const void *)src, size);
9810 + }
9811 switch (size) {
9812 case 1: {
9813 u8 tmp;
9814 - __get_user_asm(tmp, (u8 __user *)src,
9815 + __get_user_asm(tmp, (const u8 __user *)src,
9816 ret, "b", "b", "=q", 1);
9817 if (likely(!ret))
9818 __put_user_asm(tmp, (u8 __user *)dst,
9819 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9820 }
9821 case 2: {
9822 u16 tmp;
9823 - __get_user_asm(tmp, (u16 __user *)src,
9824 + __get_user_asm(tmp, (const u16 __user *)src,
9825 ret, "w", "w", "=r", 2);
9826 if (likely(!ret))
9827 __put_user_asm(tmp, (u16 __user *)dst,
9828 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9829
9830 case 4: {
9831 u32 tmp;
9832 - __get_user_asm(tmp, (u32 __user *)src,
9833 + __get_user_asm(tmp, (const u32 __user *)src,
9834 ret, "l", "k", "=r", 4);
9835 if (likely(!ret))
9836 __put_user_asm(tmp, (u32 __user *)dst,
9837 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9838 }
9839 case 8: {
9840 u64 tmp;
9841 - __get_user_asm(tmp, (u64 __user *)src,
9842 + __get_user_asm(tmp, (const u64 __user *)src,
9843 ret, "q", "", "=r", 8);
9844 if (likely(!ret))
9845 __put_user_asm(tmp, (u64 __user *)dst,
9846 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9847 return ret;
9848 }
9849 default:
9850 - return copy_user_generic((__force void *)dst,
9851 - (__force void *)src, size);
9852 +
9853 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9854 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9855 + src += PAX_USER_SHADOW_BASE;
9856 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9857 + dst += PAX_USER_SHADOW_BASE;
9858 +#endif
9859 +
9860 + return copy_user_generic((__force_kernel void *)dst,
9861 + (__force_kernel const void *)src, size);
9862 }
9863 }
9864
9865 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9866 static __must_check __always_inline int
9867 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9868 {
9869 - return copy_user_generic(dst, (__force const void *)src, size);
9870 + pax_track_stack();
9871 +
9872 + if ((int)size < 0)
9873 + return size;
9874 +
9875 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9876 + if (!__access_ok(VERIFY_READ, src, size))
9877 + return size;
9878 +
9879 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9880 + src += PAX_USER_SHADOW_BASE;
9881 +#endif
9882 +
9883 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
9884 }
9885
9886 -static __must_check __always_inline int
9887 +static __must_check __always_inline unsigned long
9888 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9889 {
9890 - return copy_user_generic((__force void *)dst, src, size);
9891 + if ((int)size < 0)
9892 + return size;
9893 +
9894 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9895 + if (!__access_ok(VERIFY_WRITE, dst, size))
9896 + return size;
9897 +
9898 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9899 + dst += PAX_USER_SHADOW_BASE;
9900 +#endif
9901 +
9902 + return copy_user_generic((__force_kernel void *)dst, src, size);
9903 }
9904
9905 -extern long __copy_user_nocache(void *dst, const void __user *src,
9906 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9907 unsigned size, int zerorest);
9908
9909 -static inline int
9910 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9911 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9912 {
9913 might_sleep();
9914 +
9915 + if ((int)size < 0)
9916 + return size;
9917 +
9918 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9919 + if (!__access_ok(VERIFY_READ, src, size))
9920 + return size;
9921 +#endif
9922 +
9923 return __copy_user_nocache(dst, src, size, 1);
9924 }
9925
9926 -static inline int
9927 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9928 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9929 unsigned size)
9930 {
9931 + if ((int)size < 0)
9932 + return size;
9933 +
9934 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9935 + if (!__access_ok(VERIFY_READ, src, size))
9936 + return size;
9937 +#endif
9938 +
9939 return __copy_user_nocache(dst, src, size, 0);
9940 }
9941
9942 -unsigned long
9943 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9944 +extern unsigned long
9945 +copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9946
9947 #endif /* _ASM_X86_UACCESS_64_H */
9948 diff -urNp linux-3.0.8/arch/x86/include/asm/uaccess.h linux-3.0.8/arch/x86/include/asm/uaccess.h
9949 --- linux-3.0.8/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9950 +++ linux-3.0.8/arch/x86/include/asm/uaccess.h 2011-10-06 04:17:55.000000000 -0400
9951 @@ -7,12 +7,15 @@
9952 #include <linux/compiler.h>
9953 #include <linux/thread_info.h>
9954 #include <linux/string.h>
9955 +#include <linux/sched.h>
9956 #include <asm/asm.h>
9957 #include <asm/page.h>
9958
9959 #define VERIFY_READ 0
9960 #define VERIFY_WRITE 1
9961
9962 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9963 +
9964 /*
9965 * The fs value determines whether argument validity checking should be
9966 * performed or not. If get_fs() == USER_DS, checking is performed, with
9967 @@ -28,7 +31,12 @@
9968
9969 #define get_ds() (KERNEL_DS)
9970 #define get_fs() (current_thread_info()->addr_limit)
9971 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9972 +void __set_fs(mm_segment_t x);
9973 +void set_fs(mm_segment_t x);
9974 +#else
9975 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9976 +#endif
9977
9978 #define segment_eq(a, b) ((a).seg == (b).seg)
9979
9980 @@ -76,7 +84,33 @@
9981 * checks that the pointer is in the user space range - after calling
9982 * this function, memory access functions may still return -EFAULT.
9983 */
9984 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9985 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9986 +#define access_ok(type, addr, size) \
9987 +({ \
9988 + long __size = size; \
9989 + unsigned long __addr = (unsigned long)addr; \
9990 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9991 + unsigned long __end_ao = __addr + __size - 1; \
9992 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9993 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9994 + while(__addr_ao <= __end_ao) { \
9995 + char __c_ao; \
9996 + __addr_ao += PAGE_SIZE; \
9997 + if (__size > PAGE_SIZE) \
9998 + cond_resched(); \
9999 + if (__get_user(__c_ao, (char __user *)__addr)) \
10000 + break; \
10001 + if (type != VERIFY_WRITE) { \
10002 + __addr = __addr_ao; \
10003 + continue; \
10004 + } \
10005 + if (__put_user(__c_ao, (char __user *)__addr)) \
10006 + break; \
10007 + __addr = __addr_ao; \
10008 + } \
10009 + } \
10010 + __ret_ao; \
10011 +})
10012
10013 /*
10014 * The exception table consists of pairs of addresses: the first is the
10015 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10016 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10017 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10018
10019 -
10020 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10021 +#define __copyuser_seg "gs;"
10022 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10023 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10024 +#else
10025 +#define __copyuser_seg
10026 +#define __COPYUSER_SET_ES
10027 +#define __COPYUSER_RESTORE_ES
10028 +#endif
10029
10030 #ifdef CONFIG_X86_32
10031 #define __put_user_asm_u64(x, addr, err, errret) \
10032 - asm volatile("1: movl %%eax,0(%2)\n" \
10033 - "2: movl %%edx,4(%2)\n" \
10034 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10035 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10036 "3:\n" \
10037 ".section .fixup,\"ax\"\n" \
10038 "4: movl %3,%0\n" \
10039 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10040 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10041
10042 #define __put_user_asm_ex_u64(x, addr) \
10043 - asm volatile("1: movl %%eax,0(%1)\n" \
10044 - "2: movl %%edx,4(%1)\n" \
10045 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10046 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10047 "3:\n" \
10048 _ASM_EXTABLE(1b, 2b - 1b) \
10049 _ASM_EXTABLE(2b, 3b - 2b) \
10050 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10051 __typeof__(*(ptr)) __pu_val; \
10052 __chk_user_ptr(ptr); \
10053 might_fault(); \
10054 - __pu_val = x; \
10055 + __pu_val = (x); \
10056 switch (sizeof(*(ptr))) { \
10057 case 1: \
10058 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10059 @@ -373,7 +415,7 @@ do { \
10060 } while (0)
10061
10062 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10063 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10064 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10065 "2:\n" \
10066 ".section .fixup,\"ax\"\n" \
10067 "3: mov %3,%0\n" \
10068 @@ -381,7 +423,7 @@ do { \
10069 " jmp 2b\n" \
10070 ".previous\n" \
10071 _ASM_EXTABLE(1b, 3b) \
10072 - : "=r" (err), ltype(x) \
10073 + : "=r" (err), ltype (x) \
10074 : "m" (__m(addr)), "i" (errret), "0" (err))
10075
10076 #define __get_user_size_ex(x, ptr, size) \
10077 @@ -406,7 +448,7 @@ do { \
10078 } while (0)
10079
10080 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10081 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10082 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10083 "2:\n" \
10084 _ASM_EXTABLE(1b, 2b - 1b) \
10085 : ltype(x) : "m" (__m(addr)))
10086 @@ -423,13 +465,24 @@ do { \
10087 int __gu_err; \
10088 unsigned long __gu_val; \
10089 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10090 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10091 + (x) = (__typeof__(*(ptr)))__gu_val; \
10092 __gu_err; \
10093 })
10094
10095 /* FIXME: this hack is definitely wrong -AK */
10096 struct __large_struct { unsigned long buf[100]; };
10097 -#define __m(x) (*(struct __large_struct __user *)(x))
10098 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10099 +#define ____m(x) \
10100 +({ \
10101 + unsigned long ____x = (unsigned long)(x); \
10102 + if (____x < PAX_USER_SHADOW_BASE) \
10103 + ____x += PAX_USER_SHADOW_BASE; \
10104 + (void __user *)____x; \
10105 +})
10106 +#else
10107 +#define ____m(x) (x)
10108 +#endif
10109 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10110
10111 /*
10112 * Tell gcc we read from memory instead of writing: this is because
10113 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10114 * aliasing issues.
10115 */
10116 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10117 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10118 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10119 "2:\n" \
10120 ".section .fixup,\"ax\"\n" \
10121 "3: mov %3,%0\n" \
10122 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10123 ".previous\n" \
10124 _ASM_EXTABLE(1b, 3b) \
10125 : "=r"(err) \
10126 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10127 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10128
10129 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10130 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10131 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10132 "2:\n" \
10133 _ASM_EXTABLE(1b, 2b - 1b) \
10134 : : ltype(x), "m" (__m(addr)))
10135 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10136 * On error, the variable @x is set to zero.
10137 */
10138
10139 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10140 +#define __get_user(x, ptr) get_user((x), (ptr))
10141 +#else
10142 #define __get_user(x, ptr) \
10143 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10144 +#endif
10145
10146 /**
10147 * __put_user: - Write a simple value into user space, with less checking.
10148 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10149 * Returns zero on success, or -EFAULT on error.
10150 */
10151
10152 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10153 +#define __put_user(x, ptr) put_user((x), (ptr))
10154 +#else
10155 #define __put_user(x, ptr) \
10156 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10157 +#endif
10158
10159 #define __get_user_unaligned __get_user
10160 #define __put_user_unaligned __put_user
10161 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10162 #define get_user_ex(x, ptr) do { \
10163 unsigned long __gue_val; \
10164 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10165 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10166 + (x) = (__typeof__(*(ptr)))__gue_val; \
10167 } while (0)
10168
10169 #ifdef CONFIG_X86_WP_WORKS_OK
10170 diff -urNp linux-3.0.8/arch/x86/include/asm/vdso.h linux-3.0.8/arch/x86/include/asm/vdso.h
10171 --- linux-3.0.8/arch/x86/include/asm/vdso.h 2011-07-21 22:17:23.000000000 -0400
10172 +++ linux-3.0.8/arch/x86/include/asm/vdso.h 2011-10-06 04:17:55.000000000 -0400
10173 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10174 #define VDSO32_SYMBOL(base, name) \
10175 ({ \
10176 extern const char VDSO32_##name[]; \
10177 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10178 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10179 })
10180 #endif
10181
10182 diff -urNp linux-3.0.8/arch/x86/include/asm/x86_init.h linux-3.0.8/arch/x86/include/asm/x86_init.h
10183 --- linux-3.0.8/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
10184 +++ linux-3.0.8/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
10185 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
10186 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10187 void (*find_smp_config)(void);
10188 void (*get_smp_config)(unsigned int early);
10189 -};
10190 +} __no_const;
10191
10192 /**
10193 * struct x86_init_resources - platform specific resource related ops
10194 @@ -42,7 +42,7 @@ struct x86_init_resources {
10195 void (*probe_roms)(void);
10196 void (*reserve_resources)(void);
10197 char *(*memory_setup)(void);
10198 -};
10199 +} __no_const;
10200
10201 /**
10202 * struct x86_init_irqs - platform specific interrupt setup
10203 @@ -55,7 +55,7 @@ struct x86_init_irqs {
10204 void (*pre_vector_init)(void);
10205 void (*intr_init)(void);
10206 void (*trap_init)(void);
10207 -};
10208 +} __no_const;
10209
10210 /**
10211 * struct x86_init_oem - oem platform specific customizing functions
10212 @@ -65,7 +65,7 @@ struct x86_init_irqs {
10213 struct x86_init_oem {
10214 void (*arch_setup)(void);
10215 void (*banner)(void);
10216 -};
10217 +} __no_const;
10218
10219 /**
10220 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10221 @@ -76,7 +76,7 @@ struct x86_init_oem {
10222 */
10223 struct x86_init_mapping {
10224 void (*pagetable_reserve)(u64 start, u64 end);
10225 -};
10226 +} __no_const;
10227
10228 /**
10229 * struct x86_init_paging - platform specific paging functions
10230 @@ -86,7 +86,7 @@ struct x86_init_mapping {
10231 struct x86_init_paging {
10232 void (*pagetable_setup_start)(pgd_t *base);
10233 void (*pagetable_setup_done)(pgd_t *base);
10234 -};
10235 +} __no_const;
10236
10237 /**
10238 * struct x86_init_timers - platform specific timer setup
10239 @@ -101,7 +101,7 @@ struct x86_init_timers {
10240 void (*tsc_pre_init)(void);
10241 void (*timer_init)(void);
10242 void (*wallclock_init)(void);
10243 -};
10244 +} __no_const;
10245
10246 /**
10247 * struct x86_init_iommu - platform specific iommu setup
10248 @@ -109,7 +109,7 @@ struct x86_init_timers {
10249 */
10250 struct x86_init_iommu {
10251 int (*iommu_init)(void);
10252 -};
10253 +} __no_const;
10254
10255 /**
10256 * struct x86_init_pci - platform specific pci init functions
10257 @@ -123,7 +123,7 @@ struct x86_init_pci {
10258 int (*init)(void);
10259 void (*init_irq)(void);
10260 void (*fixup_irqs)(void);
10261 -};
10262 +} __no_const;
10263
10264 /**
10265 * struct x86_init_ops - functions for platform specific setup
10266 @@ -139,7 +139,7 @@ struct x86_init_ops {
10267 struct x86_init_timers timers;
10268 struct x86_init_iommu iommu;
10269 struct x86_init_pci pci;
10270 -};
10271 +} __no_const;
10272
10273 /**
10274 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10275 @@ -147,7 +147,7 @@ struct x86_init_ops {
10276 */
10277 struct x86_cpuinit_ops {
10278 void (*setup_percpu_clockev)(void);
10279 -};
10280 +} __no_const;
10281
10282 /**
10283 * struct x86_platform_ops - platform specific runtime functions
10284 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10285 bool (*is_untracked_pat_range)(u64 start, u64 end);
10286 void (*nmi_init)(void);
10287 int (*i8042_detect)(void);
10288 -};
10289 +} __no_const;
10290
10291 struct pci_dev;
10292
10293 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10294 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10295 void (*teardown_msi_irq)(unsigned int irq);
10296 void (*teardown_msi_irqs)(struct pci_dev *dev);
10297 -};
10298 +} __no_const;
10299
10300 extern struct x86_init_ops x86_init;
10301 extern struct x86_cpuinit_ops x86_cpuinit;
10302 diff -urNp linux-3.0.8/arch/x86/include/asm/xsave.h linux-3.0.8/arch/x86/include/asm/xsave.h
10303 --- linux-3.0.8/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10304 +++ linux-3.0.8/arch/x86/include/asm/xsave.h 2011-10-06 04:17:55.000000000 -0400
10305 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10306 {
10307 int err;
10308
10309 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10310 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10311 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10312 +#endif
10313 +
10314 /*
10315 * Clear the xsave header first, so that reserved fields are
10316 * initialized to zero.
10317 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10318 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10319 {
10320 int err;
10321 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10322 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10323 u32 lmask = mask;
10324 u32 hmask = mask >> 32;
10325
10326 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10327 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10328 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10329 +#endif
10330 +
10331 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10332 "2:\n"
10333 ".section .fixup,\"ax\"\n"
10334 diff -urNp linux-3.0.8/arch/x86/Kconfig linux-3.0.8/arch/x86/Kconfig
10335 --- linux-3.0.8/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10336 +++ linux-3.0.8/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10337 @@ -229,7 +229,7 @@ config X86_HT
10338
10339 config X86_32_LAZY_GS
10340 def_bool y
10341 - depends on X86_32 && !CC_STACKPROTECTOR
10342 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10343
10344 config ARCH_HWEIGHT_CFLAGS
10345 string
10346 @@ -1018,7 +1018,7 @@ choice
10347
10348 config NOHIGHMEM
10349 bool "off"
10350 - depends on !X86_NUMAQ
10351 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10352 ---help---
10353 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10354 However, the address space of 32-bit x86 processors is only 4
10355 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10356
10357 config HIGHMEM4G
10358 bool "4GB"
10359 - depends on !X86_NUMAQ
10360 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10361 ---help---
10362 Select this if you have a 32-bit processor and between 1 and 4
10363 gigabytes of physical RAM.
10364 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10365 hex
10366 default 0xB0000000 if VMSPLIT_3G_OPT
10367 default 0x80000000 if VMSPLIT_2G
10368 - default 0x78000000 if VMSPLIT_2G_OPT
10369 + default 0x70000000 if VMSPLIT_2G_OPT
10370 default 0x40000000 if VMSPLIT_1G
10371 default 0xC0000000
10372 depends on X86_32
10373 @@ -1483,6 +1483,7 @@ config SECCOMP
10374
10375 config CC_STACKPROTECTOR
10376 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10377 + depends on X86_64 || !PAX_MEMORY_UDEREF
10378 ---help---
10379 This option turns on the -fstack-protector GCC feature. This
10380 feature puts, at the beginning of functions, a canary value on
10381 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10382 config PHYSICAL_START
10383 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10384 default "0x1000000"
10385 + range 0x400000 0x40000000
10386 ---help---
10387 This gives the physical address where the kernel is loaded.
10388
10389 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10390 config PHYSICAL_ALIGN
10391 hex "Alignment value to which kernel should be aligned" if X86_32
10392 default "0x1000000"
10393 + range 0x400000 0x1000000 if PAX_KERNEXEC
10394 range 0x2000 0x1000000
10395 ---help---
10396 This value puts the alignment restrictions on physical address
10397 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10398 Say N if you want to disable CPU hotplug.
10399
10400 config COMPAT_VDSO
10401 - def_bool y
10402 + def_bool n
10403 prompt "Compat VDSO support"
10404 depends on X86_32 || IA32_EMULATION
10405 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10406 ---help---
10407 Map the 32-bit VDSO to the predictable old-style address too.
10408
10409 diff -urNp linux-3.0.8/arch/x86/Kconfig.cpu linux-3.0.8/arch/x86/Kconfig.cpu
10410 --- linux-3.0.8/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10411 +++ linux-3.0.8/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10412 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10413
10414 config X86_F00F_BUG
10415 def_bool y
10416 - depends on M586MMX || M586TSC || M586 || M486 || M386
10417 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10418
10419 config X86_INVD_BUG
10420 def_bool y
10421 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10422
10423 config X86_ALIGNMENT_16
10424 def_bool y
10425 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10426 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10427
10428 config X86_INTEL_USERCOPY
10429 def_bool y
10430 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10431 # generates cmov.
10432 config X86_CMOV
10433 def_bool y
10434 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10435 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10436
10437 config X86_MINIMUM_CPU_FAMILY
10438 int
10439 diff -urNp linux-3.0.8/arch/x86/Kconfig.debug linux-3.0.8/arch/x86/Kconfig.debug
10440 --- linux-3.0.8/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10441 +++ linux-3.0.8/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10442 @@ -81,7 +81,7 @@ config X86_PTDUMP
10443 config DEBUG_RODATA
10444 bool "Write protect kernel read-only data structures"
10445 default y
10446 - depends on DEBUG_KERNEL
10447 + depends on DEBUG_KERNEL && BROKEN
10448 ---help---
10449 Mark the kernel read-only data as write-protected in the pagetables,
10450 in order to catch accidental (and incorrect) writes to such const
10451 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10452
10453 config DEBUG_SET_MODULE_RONX
10454 bool "Set loadable kernel module data as NX and text as RO"
10455 - depends on MODULES
10456 + depends on MODULES && BROKEN
10457 ---help---
10458 This option helps catch unintended modifications to loadable
10459 kernel module's text and read-only data. It also prevents execution
10460 diff -urNp linux-3.0.8/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.8/arch/x86/kernel/acpi/realmode/Makefile
10461 --- linux-3.0.8/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10462 +++ linux-3.0.8/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10463 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10464 $(call cc-option, -fno-stack-protector) \
10465 $(call cc-option, -mpreferred-stack-boundary=2)
10466 KBUILD_CFLAGS += $(call cc-option, -m32)
10467 +ifdef CONSTIFY_PLUGIN
10468 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10469 +endif
10470 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10471 GCOV_PROFILE := n
10472
10473 diff -urNp linux-3.0.8/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.8/arch/x86/kernel/acpi/realmode/wakeup.S
10474 --- linux-3.0.8/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10475 +++ linux-3.0.8/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10476 @@ -108,6 +108,9 @@ wakeup_code:
10477 /* Do any other stuff... */
10478
10479 #ifndef CONFIG_64BIT
10480 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10481 + call verify_cpu
10482 +
10483 /* This could also be done in C code... */
10484 movl pmode_cr3, %eax
10485 movl %eax, %cr3
10486 @@ -131,6 +134,7 @@ wakeup_code:
10487 movl pmode_cr0, %eax
10488 movl %eax, %cr0
10489 jmp pmode_return
10490 +# include "../../verify_cpu.S"
10491 #else
10492 pushw $0
10493 pushw trampoline_segment
10494 diff -urNp linux-3.0.8/arch/x86/kernel/acpi/sleep.c linux-3.0.8/arch/x86/kernel/acpi/sleep.c
10495 --- linux-3.0.8/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10496 +++ linux-3.0.8/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10497 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10498 header->trampoline_segment = trampoline_address() >> 4;
10499 #ifdef CONFIG_SMP
10500 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10501 +
10502 + pax_open_kernel();
10503 early_gdt_descr.address =
10504 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10505 + pax_close_kernel();
10506 +
10507 initial_gs = per_cpu_offset(smp_processor_id());
10508 #endif
10509 initial_code = (unsigned long)wakeup_long64;
10510 diff -urNp linux-3.0.8/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.8/arch/x86/kernel/acpi/wakeup_32.S
10511 --- linux-3.0.8/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10512 +++ linux-3.0.8/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10513 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10514 # and restore the stack ... but you need gdt for this to work
10515 movl saved_context_esp, %esp
10516
10517 - movl %cs:saved_magic, %eax
10518 - cmpl $0x12345678, %eax
10519 + cmpl $0x12345678, saved_magic
10520 jne bogus_magic
10521
10522 # jump to place where we left off
10523 - movl saved_eip, %eax
10524 - jmp *%eax
10525 + jmp *(saved_eip)
10526
10527 bogus_magic:
10528 jmp bogus_magic
10529 diff -urNp linux-3.0.8/arch/x86/kernel/alternative.c linux-3.0.8/arch/x86/kernel/alternative.c
10530 --- linux-3.0.8/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10531 +++ linux-3.0.8/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10532 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10533 if (!*poff || ptr < text || ptr >= text_end)
10534 continue;
10535 /* turn DS segment override prefix into lock prefix */
10536 - if (*ptr == 0x3e)
10537 + if (*ktla_ktva(ptr) == 0x3e)
10538 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10539 };
10540 mutex_unlock(&text_mutex);
10541 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10542 if (!*poff || ptr < text || ptr >= text_end)
10543 continue;
10544 /* turn lock prefix into DS segment override prefix */
10545 - if (*ptr == 0xf0)
10546 + if (*ktla_ktva(ptr) == 0xf0)
10547 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10548 };
10549 mutex_unlock(&text_mutex);
10550 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10551
10552 BUG_ON(p->len > MAX_PATCH_LEN);
10553 /* prep the buffer with the original instructions */
10554 - memcpy(insnbuf, p->instr, p->len);
10555 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10556 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10557 (unsigned long)p->instr, p->len);
10558
10559 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10560 if (smp_alt_once)
10561 free_init_pages("SMP alternatives",
10562 (unsigned long)__smp_locks,
10563 - (unsigned long)__smp_locks_end);
10564 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10565
10566 restart_nmi();
10567 }
10568 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10569 * instructions. And on the local CPU you need to be protected again NMI or MCE
10570 * handlers seeing an inconsistent instruction while you patch.
10571 */
10572 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10573 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10574 size_t len)
10575 {
10576 unsigned long flags;
10577 local_irq_save(flags);
10578 - memcpy(addr, opcode, len);
10579 +
10580 + pax_open_kernel();
10581 + memcpy(ktla_ktva(addr), opcode, len);
10582 sync_core();
10583 + pax_close_kernel();
10584 +
10585 local_irq_restore(flags);
10586 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10587 that causes hangs on some VIA CPUs. */
10588 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10589 */
10590 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10591 {
10592 - unsigned long flags;
10593 - char *vaddr;
10594 + unsigned char *vaddr = ktla_ktva(addr);
10595 struct page *pages[2];
10596 - int i;
10597 + size_t i;
10598
10599 if (!core_kernel_text((unsigned long)addr)) {
10600 - pages[0] = vmalloc_to_page(addr);
10601 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10602 + pages[0] = vmalloc_to_page(vaddr);
10603 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10604 } else {
10605 - pages[0] = virt_to_page(addr);
10606 + pages[0] = virt_to_page(vaddr);
10607 WARN_ON(!PageReserved(pages[0]));
10608 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10609 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10610 }
10611 BUG_ON(!pages[0]);
10612 - local_irq_save(flags);
10613 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10614 - if (pages[1])
10615 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10616 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10617 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10618 - clear_fixmap(FIX_TEXT_POKE0);
10619 - if (pages[1])
10620 - clear_fixmap(FIX_TEXT_POKE1);
10621 - local_flush_tlb();
10622 - sync_core();
10623 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10624 - that causes hangs on some VIA CPUs. */
10625 + text_poke_early(addr, opcode, len);
10626 for (i = 0; i < len; i++)
10627 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10628 - local_irq_restore(flags);
10629 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10630 return addr;
10631 }
10632
10633 diff -urNp linux-3.0.8/arch/x86/kernel/apic/apic.c linux-3.0.8/arch/x86/kernel/apic/apic.c
10634 --- linux-3.0.8/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10635 +++ linux-3.0.8/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10636 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10637 /*
10638 * Debug level, exported for io_apic.c
10639 */
10640 -unsigned int apic_verbosity;
10641 +int apic_verbosity;
10642
10643 int pic_mode;
10644
10645 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10646 apic_write(APIC_ESR, 0);
10647 v1 = apic_read(APIC_ESR);
10648 ack_APIC_irq();
10649 - atomic_inc(&irq_err_count);
10650 + atomic_inc_unchecked(&irq_err_count);
10651
10652 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10653 smp_processor_id(), v0 , v1);
10654 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10655 u16 *bios_cpu_apicid;
10656 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10657
10658 + pax_track_stack();
10659 +
10660 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10661 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10662
10663 diff -urNp linux-3.0.8/arch/x86/kernel/apic/io_apic.c linux-3.0.8/arch/x86/kernel/apic/io_apic.c
10664 --- linux-3.0.8/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10665 +++ linux-3.0.8/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10666 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10667 }
10668 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10669
10670 -void lock_vector_lock(void)
10671 +void lock_vector_lock(void) __acquires(vector_lock)
10672 {
10673 /* Used to the online set of cpus does not change
10674 * during assign_irq_vector.
10675 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10676 raw_spin_lock(&vector_lock);
10677 }
10678
10679 -void unlock_vector_lock(void)
10680 +void unlock_vector_lock(void) __releases(vector_lock)
10681 {
10682 raw_spin_unlock(&vector_lock);
10683 }
10684 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10685 ack_APIC_irq();
10686 }
10687
10688 -atomic_t irq_mis_count;
10689 +atomic_unchecked_t irq_mis_count;
10690
10691 /*
10692 * IO-APIC versions below 0x20 don't support EOI register.
10693 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10694 * at the cpu.
10695 */
10696 if (!(v & (1 << (i & 0x1f)))) {
10697 - atomic_inc(&irq_mis_count);
10698 + atomic_inc_unchecked(&irq_mis_count);
10699
10700 eoi_ioapic_irq(irq, cfg);
10701 }
10702 diff -urNp linux-3.0.8/arch/x86/kernel/apm_32.c linux-3.0.8/arch/x86/kernel/apm_32.c
10703 --- linux-3.0.8/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10704 +++ linux-3.0.8/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10705 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10706 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10707 * even though they are called in protected mode.
10708 */
10709 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10710 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10711 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10712
10713 static const char driver_version[] = "1.16ac"; /* no spaces */
10714 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10715 BUG_ON(cpu != 0);
10716 gdt = get_cpu_gdt_table(cpu);
10717 save_desc_40 = gdt[0x40 / 8];
10718 +
10719 + pax_open_kernel();
10720 gdt[0x40 / 8] = bad_bios_desc;
10721 + pax_close_kernel();
10722
10723 apm_irq_save(flags);
10724 APM_DO_SAVE_SEGS;
10725 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10726 &call->esi);
10727 APM_DO_RESTORE_SEGS;
10728 apm_irq_restore(flags);
10729 +
10730 + pax_open_kernel();
10731 gdt[0x40 / 8] = save_desc_40;
10732 + pax_close_kernel();
10733 +
10734 put_cpu();
10735
10736 return call->eax & 0xff;
10737 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10738 BUG_ON(cpu != 0);
10739 gdt = get_cpu_gdt_table(cpu);
10740 save_desc_40 = gdt[0x40 / 8];
10741 +
10742 + pax_open_kernel();
10743 gdt[0x40 / 8] = bad_bios_desc;
10744 + pax_close_kernel();
10745
10746 apm_irq_save(flags);
10747 APM_DO_SAVE_SEGS;
10748 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10749 &call->eax);
10750 APM_DO_RESTORE_SEGS;
10751 apm_irq_restore(flags);
10752 +
10753 + pax_open_kernel();
10754 gdt[0x40 / 8] = save_desc_40;
10755 + pax_close_kernel();
10756 +
10757 put_cpu();
10758 return error;
10759 }
10760 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10761 * code to that CPU.
10762 */
10763 gdt = get_cpu_gdt_table(0);
10764 +
10765 + pax_open_kernel();
10766 set_desc_base(&gdt[APM_CS >> 3],
10767 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10768 set_desc_base(&gdt[APM_CS_16 >> 3],
10769 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10770 set_desc_base(&gdt[APM_DS >> 3],
10771 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10772 + pax_close_kernel();
10773
10774 proc_create("apm", 0, NULL, &apm_file_ops);
10775
10776 diff -urNp linux-3.0.8/arch/x86/kernel/asm-offsets_64.c linux-3.0.8/arch/x86/kernel/asm-offsets_64.c
10777 --- linux-3.0.8/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10778 +++ linux-3.0.8/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10779 @@ -69,6 +69,7 @@ int main(void)
10780 BLANK();
10781 #undef ENTRY
10782
10783 + DEFINE(TSS_size, sizeof(struct tss_struct));
10784 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10785 BLANK();
10786
10787 diff -urNp linux-3.0.8/arch/x86/kernel/asm-offsets.c linux-3.0.8/arch/x86/kernel/asm-offsets.c
10788 --- linux-3.0.8/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10789 +++ linux-3.0.8/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10790 @@ -33,6 +33,8 @@ void common(void) {
10791 OFFSET(TI_status, thread_info, status);
10792 OFFSET(TI_addr_limit, thread_info, addr_limit);
10793 OFFSET(TI_preempt_count, thread_info, preempt_count);
10794 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10795 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10796
10797 BLANK();
10798 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10799 @@ -53,8 +55,26 @@ void common(void) {
10800 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10801 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10802 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10803 +
10804 +#ifdef CONFIG_PAX_KERNEXEC
10805 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10806 +#endif
10807 +
10808 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10809 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10810 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10811 +#ifdef CONFIG_X86_64
10812 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10813 +#endif
10814 #endif
10815
10816 +#endif
10817 +
10818 + BLANK();
10819 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10820 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10821 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10822 +
10823 #ifdef CONFIG_XEN
10824 BLANK();
10825 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10826 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/amd.c linux-3.0.8/arch/x86/kernel/cpu/amd.c
10827 --- linux-3.0.8/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10828 +++ linux-3.0.8/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10829 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10830 unsigned int size)
10831 {
10832 /* AMD errata T13 (order #21922) */
10833 - if ((c->x86 == 6)) {
10834 + if (c->x86 == 6) {
10835 /* Duron Rev A0 */
10836 if (c->x86_model == 3 && c->x86_mask == 0)
10837 size = 64;
10838 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/common.c linux-3.0.8/arch/x86/kernel/cpu/common.c
10839 --- linux-3.0.8/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10840 +++ linux-3.0.8/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10841 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10842
10843 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10844
10845 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10846 -#ifdef CONFIG_X86_64
10847 - /*
10848 - * We need valid kernel segments for data and code in long mode too
10849 - * IRET will check the segment types kkeil 2000/10/28
10850 - * Also sysret mandates a special GDT layout
10851 - *
10852 - * TLS descriptors are currently at a different place compared to i386.
10853 - * Hopefully nobody expects them at a fixed place (Wine?)
10854 - */
10855 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10856 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10857 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10858 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10859 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10860 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10861 -#else
10862 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10863 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10864 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10865 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10866 - /*
10867 - * Segments used for calling PnP BIOS have byte granularity.
10868 - * They code segments and data segments have fixed 64k limits,
10869 - * the transfer segment sizes are set at run time.
10870 - */
10871 - /* 32-bit code */
10872 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10873 - /* 16-bit code */
10874 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10875 - /* 16-bit data */
10876 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10877 - /* 16-bit data */
10878 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10879 - /* 16-bit data */
10880 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10881 - /*
10882 - * The APM segments have byte granularity and their bases
10883 - * are set at run time. All have 64k limits.
10884 - */
10885 - /* 32-bit code */
10886 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10887 - /* 16-bit code */
10888 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10889 - /* data */
10890 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10891 -
10892 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10893 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10894 - GDT_STACK_CANARY_INIT
10895 -#endif
10896 -} };
10897 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10898 -
10899 static int __init x86_xsave_setup(char *s)
10900 {
10901 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10902 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10903 {
10904 struct desc_ptr gdt_descr;
10905
10906 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10907 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10908 gdt_descr.size = GDT_SIZE - 1;
10909 load_gdt(&gdt_descr);
10910 /* Reload the per-cpu base */
10911 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10912 /* Filter out anything that depends on CPUID levels we don't have */
10913 filter_cpuid_features(c, true);
10914
10915 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10916 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10917 +#endif
10918 +
10919 /* If the model name is still unset, do table lookup. */
10920 if (!c->x86_model_id[0]) {
10921 const char *p;
10922 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10923 }
10924 __setup("clearcpuid=", setup_disablecpuid);
10925
10926 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10927 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10928 +
10929 #ifdef CONFIG_X86_64
10930 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10931
10932 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10933 EXPORT_PER_CPU_SYMBOL(current_task);
10934
10935 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10936 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10937 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10938 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10939
10940 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10941 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10942 {
10943 memset(regs, 0, sizeof(struct pt_regs));
10944 regs->fs = __KERNEL_PERCPU;
10945 - regs->gs = __KERNEL_STACK_CANARY;
10946 + savesegment(gs, regs->gs);
10947
10948 return regs;
10949 }
10950 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10951 int i;
10952
10953 cpu = stack_smp_processor_id();
10954 - t = &per_cpu(init_tss, cpu);
10955 + t = init_tss + cpu;
10956 oist = &per_cpu(orig_ist, cpu);
10957
10958 #ifdef CONFIG_NUMA
10959 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10960 switch_to_new_gdt(cpu);
10961 loadsegment(fs, 0);
10962
10963 - load_idt((const struct desc_ptr *)&idt_descr);
10964 + load_idt(&idt_descr);
10965
10966 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10967 syscall_init();
10968 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10969 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10970 barrier();
10971
10972 - x86_configure_nx();
10973 if (cpu != 0)
10974 enable_x2apic();
10975
10976 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10977 {
10978 int cpu = smp_processor_id();
10979 struct task_struct *curr = current;
10980 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10981 + struct tss_struct *t = init_tss + cpu;
10982 struct thread_struct *thread = &curr->thread;
10983
10984 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10985 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/intel.c linux-3.0.8/arch/x86/kernel/cpu/intel.c
10986 --- linux-3.0.8/arch/x86/kernel/cpu/intel.c 2011-10-24 08:05:23.000000000 -0400
10987 +++ linux-3.0.8/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10988 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10989 * Update the IDT descriptor and reload the IDT so that
10990 * it uses the read-only mapped virtual address.
10991 */
10992 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10993 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10994 load_idt(&idt_descr);
10995 }
10996 #endif
10997 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/Makefile linux-3.0.8/arch/x86/kernel/cpu/Makefile
10998 --- linux-3.0.8/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10999 +++ linux-3.0.8/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
11000 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11001 CFLAGS_REMOVE_perf_event.o = -pg
11002 endif
11003
11004 -# Make sure load_percpu_segment has no stackprotector
11005 -nostackp := $(call cc-option, -fno-stack-protector)
11006 -CFLAGS_common.o := $(nostackp)
11007 -
11008 obj-y := intel_cacheinfo.o scattered.o topology.o
11009 obj-y += proc.o capflags.o powerflags.o common.o
11010 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11011 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce.c
11012 --- linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
11013 +++ linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
11014 @@ -46,6 +46,7 @@
11015 #include <asm/ipi.h>
11016 #include <asm/mce.h>
11017 #include <asm/msr.h>
11018 +#include <asm/local.h>
11019
11020 #include "mce-internal.h"
11021
11022 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
11023 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11024 m->cs, m->ip);
11025
11026 - if (m->cs == __KERNEL_CS)
11027 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11028 print_symbol("{%s}", m->ip);
11029 pr_cont("\n");
11030 }
11031 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
11032
11033 #define PANIC_TIMEOUT 5 /* 5 seconds */
11034
11035 -static atomic_t mce_paniced;
11036 +static atomic_unchecked_t mce_paniced;
11037
11038 static int fake_panic;
11039 -static atomic_t mce_fake_paniced;
11040 +static atomic_unchecked_t mce_fake_paniced;
11041
11042 /* Panic in progress. Enable interrupts and wait for final IPI */
11043 static void wait_for_panic(void)
11044 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
11045 /*
11046 * Make sure only one CPU runs in machine check panic
11047 */
11048 - if (atomic_inc_return(&mce_paniced) > 1)
11049 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11050 wait_for_panic();
11051 barrier();
11052
11053 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
11054 console_verbose();
11055 } else {
11056 /* Don't log too much for fake panic */
11057 - if (atomic_inc_return(&mce_fake_paniced) > 1)
11058 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11059 return;
11060 }
11061 /* First print corrected ones that are still unlogged */
11062 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
11063 * might have been modified by someone else.
11064 */
11065 rmb();
11066 - if (atomic_read(&mce_paniced))
11067 + if (atomic_read_unchecked(&mce_paniced))
11068 wait_for_panic();
11069 if (!monarch_timeout)
11070 goto out;
11071 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
11072 */
11073
11074 static DEFINE_SPINLOCK(mce_state_lock);
11075 -static int open_count; /* #times opened */
11076 +static local_t open_count; /* #times opened */
11077 static int open_exclu; /* already open exclusive? */
11078
11079 static int mce_open(struct inode *inode, struct file *file)
11080 {
11081 spin_lock(&mce_state_lock);
11082
11083 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11084 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11085 spin_unlock(&mce_state_lock);
11086
11087 return -EBUSY;
11088 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
11089
11090 if (file->f_flags & O_EXCL)
11091 open_exclu = 1;
11092 - open_count++;
11093 + local_inc(&open_count);
11094
11095 spin_unlock(&mce_state_lock);
11096
11097 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
11098 {
11099 spin_lock(&mce_state_lock);
11100
11101 - open_count--;
11102 + local_dec(&open_count);
11103 open_exclu = 0;
11104
11105 spin_unlock(&mce_state_lock);
11106 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
11107 static void mce_reset(void)
11108 {
11109 cpu_missing = 0;
11110 - atomic_set(&mce_fake_paniced, 0);
11111 + atomic_set_unchecked(&mce_fake_paniced, 0);
11112 atomic_set(&mce_executing, 0);
11113 atomic_set(&mce_callin, 0);
11114 atomic_set(&global_nwo, 0);
11115 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce-inject.c
11116 --- linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
11117 +++ linux-3.0.8/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
11118 @@ -215,7 +215,9 @@ static int inject_init(void)
11119 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11120 return -ENOMEM;
11121 printk(KERN_INFO "Machine check injector initialized\n");
11122 - mce_chrdev_ops.write = mce_write;
11123 + pax_open_kernel();
11124 + *(void **)&mce_chrdev_ops.write = mce_write;
11125 + pax_close_kernel();
11126 register_die_notifier(&mce_raise_nb);
11127 return 0;
11128 }
11129 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.8/arch/x86/kernel/cpu/mtrr/main.c
11130 --- linux-3.0.8/arch/x86/kernel/cpu/mtrr/main.c 2011-10-24 08:05:23.000000000 -0400
11131 +++ linux-3.0.8/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
11132 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11133 u64 size_or_mask, size_and_mask;
11134 static bool mtrr_aps_delayed_init;
11135
11136 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11137 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11138
11139 const struct mtrr_ops *mtrr_if;
11140
11141 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.8/arch/x86/kernel/cpu/mtrr/mtrr.h
11142 --- linux-3.0.8/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
11143 +++ linux-3.0.8/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
11144 @@ -25,7 +25,7 @@ struct mtrr_ops {
11145 int (*validate_add_page)(unsigned long base, unsigned long size,
11146 unsigned int type);
11147 int (*have_wrcomb)(void);
11148 -};
11149 +} __do_const;
11150
11151 extern int generic_get_free_region(unsigned long base, unsigned long size,
11152 int replace_reg);
11153 diff -urNp linux-3.0.8/arch/x86/kernel/cpu/perf_event.c linux-3.0.8/arch/x86/kernel/cpu/perf_event.c
11154 --- linux-3.0.8/arch/x86/kernel/cpu/perf_event.c 2011-10-24 08:05:30.000000000 -0400
11155 +++ linux-3.0.8/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:55:27.000000000 -0400
11156 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
11157 int i, j, w, wmax, num = 0;
11158 struct hw_perf_event *hwc;
11159
11160 + pax_track_stack();
11161 +
11162 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11163
11164 for (i = 0; i < n; i++) {
11165 @@ -1875,7 +1877,7 @@ perf_callchain_user(struct perf_callchai
11166 break;
11167
11168 perf_callchain_store(entry, frame.return_address);
11169 - fp = frame.next_frame;
11170 + fp = (const void __force_user *)frame.next_frame;
11171 }
11172 }
11173
11174 diff -urNp linux-3.0.8/arch/x86/kernel/crash.c linux-3.0.8/arch/x86/kernel/crash.c
11175 --- linux-3.0.8/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
11176 +++ linux-3.0.8/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
11177 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11178 regs = args->regs;
11179
11180 #ifdef CONFIG_X86_32
11181 - if (!user_mode_vm(regs)) {
11182 + if (!user_mode(regs)) {
11183 crash_fixup_ss_esp(&fixed_regs, regs);
11184 regs = &fixed_regs;
11185 }
11186 diff -urNp linux-3.0.8/arch/x86/kernel/doublefault_32.c linux-3.0.8/arch/x86/kernel/doublefault_32.c
11187 --- linux-3.0.8/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
11188 +++ linux-3.0.8/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
11189 @@ -11,7 +11,7 @@
11190
11191 #define DOUBLEFAULT_STACKSIZE (1024)
11192 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11193 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11194 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11195
11196 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11197
11198 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
11199 unsigned long gdt, tss;
11200
11201 store_gdt(&gdt_desc);
11202 - gdt = gdt_desc.address;
11203 + gdt = (unsigned long)gdt_desc.address;
11204
11205 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11206
11207 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11208 /* 0x2 bit is always set */
11209 .flags = X86_EFLAGS_SF | 0x2,
11210 .sp = STACK_START,
11211 - .es = __USER_DS,
11212 + .es = __KERNEL_DS,
11213 .cs = __KERNEL_CS,
11214 .ss = __KERNEL_DS,
11215 - .ds = __USER_DS,
11216 + .ds = __KERNEL_DS,
11217 .fs = __KERNEL_PERCPU,
11218
11219 .__cr3 = __pa_nodebug(swapper_pg_dir),
11220 diff -urNp linux-3.0.8/arch/x86/kernel/dumpstack_32.c linux-3.0.8/arch/x86/kernel/dumpstack_32.c
11221 --- linux-3.0.8/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11222 +++ linux-3.0.8/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11223 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11224 bp = stack_frame(task, regs);
11225
11226 for (;;) {
11227 - struct thread_info *context;
11228 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11229
11230 - context = (struct thread_info *)
11231 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11232 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11233 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11234
11235 - stack = (unsigned long *)context->previous_esp;
11236 - if (!stack)
11237 + if (stack_start == task_stack_page(task))
11238 break;
11239 + stack = *(unsigned long **)stack_start;
11240 if (ops->stack(data, "IRQ") < 0)
11241 break;
11242 touch_nmi_watchdog();
11243 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11244 * When in-kernel, we also print out the stack and code at the
11245 * time of the fault..
11246 */
11247 - if (!user_mode_vm(regs)) {
11248 + if (!user_mode(regs)) {
11249 unsigned int code_prologue = code_bytes * 43 / 64;
11250 unsigned int code_len = code_bytes;
11251 unsigned char c;
11252 u8 *ip;
11253 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11254
11255 printk(KERN_EMERG "Stack:\n");
11256 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11257
11258 printk(KERN_EMERG "Code: ");
11259
11260 - ip = (u8 *)regs->ip - code_prologue;
11261 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11262 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11263 /* try starting at IP */
11264 - ip = (u8 *)regs->ip;
11265 + ip = (u8 *)regs->ip + cs_base;
11266 code_len = code_len - code_prologue + 1;
11267 }
11268 for (i = 0; i < code_len; i++, ip++) {
11269 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11270 printk(" Bad EIP value.");
11271 break;
11272 }
11273 - if (ip == (u8 *)regs->ip)
11274 + if (ip == (u8 *)regs->ip + cs_base)
11275 printk("<%02x> ", c);
11276 else
11277 printk("%02x ", c);
11278 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11279 {
11280 unsigned short ud2;
11281
11282 + ip = ktla_ktva(ip);
11283 if (ip < PAGE_OFFSET)
11284 return 0;
11285 if (probe_kernel_address((unsigned short *)ip, ud2))
11286 diff -urNp linux-3.0.8/arch/x86/kernel/dumpstack_64.c linux-3.0.8/arch/x86/kernel/dumpstack_64.c
11287 --- linux-3.0.8/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11288 +++ linux-3.0.8/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11289 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11290 unsigned long *irq_stack_end =
11291 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11292 unsigned used = 0;
11293 - struct thread_info *tinfo;
11294 int graph = 0;
11295 unsigned long dummy;
11296 + void *stack_start;
11297
11298 if (!task)
11299 task = current;
11300 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11301 * current stack address. If the stacks consist of nested
11302 * exceptions
11303 */
11304 - tinfo = task_thread_info(task);
11305 for (;;) {
11306 char *id;
11307 unsigned long *estack_end;
11308 +
11309 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11310 &used, &id);
11311
11312 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11313 if (ops->stack(data, id) < 0)
11314 break;
11315
11316 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11317 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11318 data, estack_end, &graph);
11319 ops->stack(data, "<EOE>");
11320 /*
11321 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11322 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11323 if (ops->stack(data, "IRQ") < 0)
11324 break;
11325 - bp = ops->walk_stack(tinfo, stack, bp,
11326 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11327 ops, data, irq_stack_end, &graph);
11328 /*
11329 * We link to the next stack (which would be
11330 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11331 /*
11332 * This handles the process stack:
11333 */
11334 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11335 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11336 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11337 put_cpu();
11338 }
11339 EXPORT_SYMBOL(dump_trace);
11340 diff -urNp linux-3.0.8/arch/x86/kernel/dumpstack.c linux-3.0.8/arch/x86/kernel/dumpstack.c
11341 --- linux-3.0.8/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11342 +++ linux-3.0.8/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11343 @@ -2,6 +2,9 @@
11344 * Copyright (C) 1991, 1992 Linus Torvalds
11345 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11346 */
11347 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11348 +#define __INCLUDED_BY_HIDESYM 1
11349 +#endif
11350 #include <linux/kallsyms.h>
11351 #include <linux/kprobes.h>
11352 #include <linux/uaccess.h>
11353 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11354 static void
11355 print_ftrace_graph_addr(unsigned long addr, void *data,
11356 const struct stacktrace_ops *ops,
11357 - struct thread_info *tinfo, int *graph)
11358 + struct task_struct *task, int *graph)
11359 {
11360 - struct task_struct *task = tinfo->task;
11361 unsigned long ret_addr;
11362 int index = task->curr_ret_stack;
11363
11364 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11365 static inline void
11366 print_ftrace_graph_addr(unsigned long addr, void *data,
11367 const struct stacktrace_ops *ops,
11368 - struct thread_info *tinfo, int *graph)
11369 + struct task_struct *task, int *graph)
11370 { }
11371 #endif
11372
11373 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11374 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11375 */
11376
11377 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11378 - void *p, unsigned int size, void *end)
11379 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11380 {
11381 - void *t = tinfo;
11382 if (end) {
11383 if (p < end && p >= (end-THREAD_SIZE))
11384 return 1;
11385 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11386 }
11387
11388 unsigned long
11389 -print_context_stack(struct thread_info *tinfo,
11390 +print_context_stack(struct task_struct *task, void *stack_start,
11391 unsigned long *stack, unsigned long bp,
11392 const struct stacktrace_ops *ops, void *data,
11393 unsigned long *end, int *graph)
11394 {
11395 struct stack_frame *frame = (struct stack_frame *)bp;
11396
11397 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11398 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11399 unsigned long addr;
11400
11401 addr = *stack;
11402 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11403 } else {
11404 ops->address(data, addr, 0);
11405 }
11406 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11407 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11408 }
11409 stack++;
11410 }
11411 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11412 EXPORT_SYMBOL_GPL(print_context_stack);
11413
11414 unsigned long
11415 -print_context_stack_bp(struct thread_info *tinfo,
11416 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11417 unsigned long *stack, unsigned long bp,
11418 const struct stacktrace_ops *ops, void *data,
11419 unsigned long *end, int *graph)
11420 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11421 struct stack_frame *frame = (struct stack_frame *)bp;
11422 unsigned long *ret_addr = &frame->return_address;
11423
11424 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11425 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11426 unsigned long addr = *ret_addr;
11427
11428 if (!__kernel_text_address(addr))
11429 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11430 ops->address(data, addr, 1);
11431 frame = frame->next_frame;
11432 ret_addr = &frame->return_address;
11433 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11434 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11435 }
11436
11437 return (unsigned long)frame;
11438 @@ -186,7 +186,7 @@ void dump_stack(void)
11439
11440 bp = stack_frame(current, NULL);
11441 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11442 - current->pid, current->comm, print_tainted(),
11443 + task_pid_nr(current), current->comm, print_tainted(),
11444 init_utsname()->release,
11445 (int)strcspn(init_utsname()->version, " "),
11446 init_utsname()->version);
11447 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11448 }
11449 EXPORT_SYMBOL_GPL(oops_begin);
11450
11451 +extern void gr_handle_kernel_exploit(void);
11452 +
11453 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11454 {
11455 if (regs && kexec_should_crash(current))
11456 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11457 panic("Fatal exception in interrupt");
11458 if (panic_on_oops)
11459 panic("Fatal exception");
11460 - do_exit(signr);
11461 +
11462 + gr_handle_kernel_exploit();
11463 +
11464 + do_group_exit(signr);
11465 }
11466
11467 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11468 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11469
11470 show_registers(regs);
11471 #ifdef CONFIG_X86_32
11472 - if (user_mode_vm(regs)) {
11473 + if (user_mode(regs)) {
11474 sp = regs->sp;
11475 ss = regs->ss & 0xffff;
11476 } else {
11477 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11478 unsigned long flags = oops_begin();
11479 int sig = SIGSEGV;
11480
11481 - if (!user_mode_vm(regs))
11482 + if (!user_mode(regs))
11483 report_bug(regs->ip, regs);
11484
11485 if (__die(str, regs, err))
11486 diff -urNp linux-3.0.8/arch/x86/kernel/early_printk.c linux-3.0.8/arch/x86/kernel/early_printk.c
11487 --- linux-3.0.8/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11488 +++ linux-3.0.8/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11489 @@ -7,6 +7,7 @@
11490 #include <linux/pci_regs.h>
11491 #include <linux/pci_ids.h>
11492 #include <linux/errno.h>
11493 +#include <linux/sched.h>
11494 #include <asm/io.h>
11495 #include <asm/processor.h>
11496 #include <asm/fcntl.h>
11497 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11498 int n;
11499 va_list ap;
11500
11501 + pax_track_stack();
11502 +
11503 va_start(ap, fmt);
11504 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11505 early_console->write(early_console, buf, n);
11506 diff -urNp linux-3.0.8/arch/x86/kernel/entry_32.S linux-3.0.8/arch/x86/kernel/entry_32.S
11507 --- linux-3.0.8/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11508 +++ linux-3.0.8/arch/x86/kernel/entry_32.S 2011-10-20 04:46:01.000000000 -0400
11509 @@ -185,13 +185,146 @@
11510 /*CFI_REL_OFFSET gs, PT_GS*/
11511 .endm
11512 .macro SET_KERNEL_GS reg
11513 +
11514 +#ifdef CONFIG_CC_STACKPROTECTOR
11515 movl $(__KERNEL_STACK_CANARY), \reg
11516 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11517 + movl $(__USER_DS), \reg
11518 +#else
11519 + xorl \reg, \reg
11520 +#endif
11521 +
11522 movl \reg, %gs
11523 .endm
11524
11525 #endif /* CONFIG_X86_32_LAZY_GS */
11526
11527 -.macro SAVE_ALL
11528 +.macro pax_enter_kernel
11529 +#ifdef CONFIG_PAX_KERNEXEC
11530 + call pax_enter_kernel
11531 +#endif
11532 +.endm
11533 +
11534 +.macro pax_exit_kernel
11535 +#ifdef CONFIG_PAX_KERNEXEC
11536 + call pax_exit_kernel
11537 +#endif
11538 +.endm
11539 +
11540 +#ifdef CONFIG_PAX_KERNEXEC
11541 +ENTRY(pax_enter_kernel)
11542 +#ifdef CONFIG_PARAVIRT
11543 + pushl %eax
11544 + pushl %ecx
11545 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11546 + mov %eax, %esi
11547 +#else
11548 + mov %cr0, %esi
11549 +#endif
11550 + bts $16, %esi
11551 + jnc 1f
11552 + mov %cs, %esi
11553 + cmp $__KERNEL_CS, %esi
11554 + jz 3f
11555 + ljmp $__KERNEL_CS, $3f
11556 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11557 +2:
11558 +#ifdef CONFIG_PARAVIRT
11559 + mov %esi, %eax
11560 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11561 +#else
11562 + mov %esi, %cr0
11563 +#endif
11564 +3:
11565 +#ifdef CONFIG_PARAVIRT
11566 + popl %ecx
11567 + popl %eax
11568 +#endif
11569 + ret
11570 +ENDPROC(pax_enter_kernel)
11571 +
11572 +ENTRY(pax_exit_kernel)
11573 +#ifdef CONFIG_PARAVIRT
11574 + pushl %eax
11575 + pushl %ecx
11576 +#endif
11577 + mov %cs, %esi
11578 + cmp $__KERNEXEC_KERNEL_CS, %esi
11579 + jnz 2f
11580 +#ifdef CONFIG_PARAVIRT
11581 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11582 + mov %eax, %esi
11583 +#else
11584 + mov %cr0, %esi
11585 +#endif
11586 + btr $16, %esi
11587 + ljmp $__KERNEL_CS, $1f
11588 +1:
11589 +#ifdef CONFIG_PARAVIRT
11590 + mov %esi, %eax
11591 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11592 +#else
11593 + mov %esi, %cr0
11594 +#endif
11595 +2:
11596 +#ifdef CONFIG_PARAVIRT
11597 + popl %ecx
11598 + popl %eax
11599 +#endif
11600 + ret
11601 +ENDPROC(pax_exit_kernel)
11602 +#endif
11603 +
11604 +.macro pax_erase_kstack
11605 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11606 + call pax_erase_kstack
11607 +#endif
11608 +.endm
11609 +
11610 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11611 +/*
11612 + * ebp: thread_info
11613 + * ecx, edx: can be clobbered
11614 + */
11615 +ENTRY(pax_erase_kstack)
11616 + pushl %edi
11617 + pushl %eax
11618 +
11619 + mov TI_lowest_stack(%ebp), %edi
11620 + mov $-0xBEEF, %eax
11621 + std
11622 +
11623 +1: mov %edi, %ecx
11624 + and $THREAD_SIZE_asm - 1, %ecx
11625 + shr $2, %ecx
11626 + repne scasl
11627 + jecxz 2f
11628 +
11629 + cmp $2*16, %ecx
11630 + jc 2f
11631 +
11632 + mov $2*16, %ecx
11633 + repe scasl
11634 + jecxz 2f
11635 + jne 1b
11636 +
11637 +2: cld
11638 + mov %esp, %ecx
11639 + sub %edi, %ecx
11640 + shr $2, %ecx
11641 + rep stosl
11642 +
11643 + mov TI_task_thread_sp0(%ebp), %edi
11644 + sub $128, %edi
11645 + mov %edi, TI_lowest_stack(%ebp)
11646 +
11647 + popl %eax
11648 + popl %edi
11649 + ret
11650 +ENDPROC(pax_erase_kstack)
11651 +#endif
11652 +
11653 +.macro __SAVE_ALL _DS
11654 cld
11655 PUSH_GS
11656 pushl_cfi %fs
11657 @@ -214,7 +347,7 @@
11658 CFI_REL_OFFSET ecx, 0
11659 pushl_cfi %ebx
11660 CFI_REL_OFFSET ebx, 0
11661 - movl $(__USER_DS), %edx
11662 + movl $\_DS, %edx
11663 movl %edx, %ds
11664 movl %edx, %es
11665 movl $(__KERNEL_PERCPU), %edx
11666 @@ -222,6 +355,15 @@
11667 SET_KERNEL_GS %edx
11668 .endm
11669
11670 +.macro SAVE_ALL
11671 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11672 + __SAVE_ALL __KERNEL_DS
11673 + pax_enter_kernel
11674 +#else
11675 + __SAVE_ALL __USER_DS
11676 +#endif
11677 +.endm
11678 +
11679 .macro RESTORE_INT_REGS
11680 popl_cfi %ebx
11681 CFI_RESTORE ebx
11682 @@ -307,7 +449,7 @@ ENTRY(ret_from_fork)
11683 popfl_cfi
11684 jmp syscall_exit
11685 CFI_ENDPROC
11686 -END(ret_from_fork)
11687 +ENDPROC(ret_from_fork)
11688
11689 /*
11690 * Interrupt exit functions should be protected against kprobes
11691 @@ -332,7 +474,15 @@ check_userspace:
11692 movb PT_CS(%esp), %al
11693 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11694 cmpl $USER_RPL, %eax
11695 +
11696 +#ifdef CONFIG_PAX_KERNEXEC
11697 + jae resume_userspace
11698 +
11699 + PAX_EXIT_KERNEL
11700 + jmp resume_kernel
11701 +#else
11702 jb resume_kernel # not returning to v8086 or userspace
11703 +#endif
11704
11705 ENTRY(resume_userspace)
11706 LOCKDEP_SYS_EXIT
11707 @@ -344,8 +494,8 @@ ENTRY(resume_userspace)
11708 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11709 # int/exception return?
11710 jne work_pending
11711 - jmp restore_all
11712 -END(ret_from_exception)
11713 + jmp restore_all_pax
11714 +ENDPROC(ret_from_exception)
11715
11716 #ifdef CONFIG_PREEMPT
11717 ENTRY(resume_kernel)
11718 @@ -360,7 +510,7 @@ need_resched:
11719 jz restore_all
11720 call preempt_schedule_irq
11721 jmp need_resched
11722 -END(resume_kernel)
11723 +ENDPROC(resume_kernel)
11724 #endif
11725 CFI_ENDPROC
11726 /*
11727 @@ -394,23 +544,34 @@ sysenter_past_esp:
11728 /*CFI_REL_OFFSET cs, 0*/
11729 /*
11730 * Push current_thread_info()->sysenter_return to the stack.
11731 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11732 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11733 */
11734 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11735 + pushl_cfi $0
11736 CFI_REL_OFFSET eip, 0
11737
11738 pushl_cfi %eax
11739 SAVE_ALL
11740 + GET_THREAD_INFO(%ebp)
11741 + movl TI_sysenter_return(%ebp),%ebp
11742 + movl %ebp,PT_EIP(%esp)
11743 ENABLE_INTERRUPTS(CLBR_NONE)
11744
11745 /*
11746 * Load the potential sixth argument from user stack.
11747 * Careful about security.
11748 */
11749 + movl PT_OLDESP(%esp),%ebp
11750 +
11751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11752 + mov PT_OLDSS(%esp),%ds
11753 +1: movl %ds:(%ebp),%ebp
11754 + push %ss
11755 + pop %ds
11756 +#else
11757 cmpl $__PAGE_OFFSET-3,%ebp
11758 jae syscall_fault
11759 1: movl (%ebp),%ebp
11760 +#endif
11761 +
11762 movl %ebp,PT_EBP(%esp)
11763 .section __ex_table,"a"
11764 .align 4
11765 @@ -433,12 +594,24 @@ sysenter_do_call:
11766 testl $_TIF_ALLWORK_MASK, %ecx
11767 jne sysexit_audit
11768 sysenter_exit:
11769 +
11770 +#ifdef CONFIG_PAX_RANDKSTACK
11771 + pushl_cfi %eax
11772 + movl %esp, %eax
11773 + call pax_randomize_kstack
11774 + popl_cfi %eax
11775 +#endif
11776 +
11777 + pax_erase_kstack
11778 +
11779 /* if something modifies registers it must also disable sysexit */
11780 movl PT_EIP(%esp), %edx
11781 movl PT_OLDESP(%esp), %ecx
11782 xorl %ebp,%ebp
11783 TRACE_IRQS_ON
11784 1: mov PT_FS(%esp), %fs
11785 +2: mov PT_DS(%esp), %ds
11786 +3: mov PT_ES(%esp), %es
11787 PTGS_TO_GS
11788 ENABLE_INTERRUPTS_SYSEXIT
11789
11790 @@ -455,6 +628,9 @@ sysenter_audit:
11791 movl %eax,%edx /* 2nd arg: syscall number */
11792 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11793 call audit_syscall_entry
11794 +
11795 + pax_erase_kstack
11796 +
11797 pushl_cfi %ebx
11798 movl PT_EAX(%esp),%eax /* reload syscall number */
11799 jmp sysenter_do_call
11800 @@ -481,11 +657,17 @@ sysexit_audit:
11801
11802 CFI_ENDPROC
11803 .pushsection .fixup,"ax"
11804 -2: movl $0,PT_FS(%esp)
11805 +4: movl $0,PT_FS(%esp)
11806 + jmp 1b
11807 +5: movl $0,PT_DS(%esp)
11808 + jmp 1b
11809 +6: movl $0,PT_ES(%esp)
11810 jmp 1b
11811 .section __ex_table,"a"
11812 .align 4
11813 - .long 1b,2b
11814 + .long 1b,4b
11815 + .long 2b,5b
11816 + .long 3b,6b
11817 .popsection
11818 PTGS_TO_GS_EX
11819 ENDPROC(ia32_sysenter_target)
11820 @@ -518,6 +700,15 @@ syscall_exit:
11821 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11822 jne syscall_exit_work
11823
11824 +restore_all_pax:
11825 +
11826 +#ifdef CONFIG_PAX_RANDKSTACK
11827 + movl %esp, %eax
11828 + call pax_randomize_kstack
11829 +#endif
11830 +
11831 + pax_erase_kstack
11832 +
11833 restore_all:
11834 TRACE_IRQS_IRET
11835 restore_all_notrace:
11836 @@ -577,14 +768,34 @@ ldt_ss:
11837 * compensating for the offset by changing to the ESPFIX segment with
11838 * a base address that matches for the difference.
11839 */
11840 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11841 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11842 mov %esp, %edx /* load kernel esp */
11843 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11844 mov %dx, %ax /* eax: new kernel esp */
11845 sub %eax, %edx /* offset (low word is 0) */
11846 +#ifdef CONFIG_SMP
11847 + movl PER_CPU_VAR(cpu_number), %ebx
11848 + shll $PAGE_SHIFT_asm, %ebx
11849 + addl $cpu_gdt_table, %ebx
11850 +#else
11851 + movl $cpu_gdt_table, %ebx
11852 +#endif
11853 shr $16, %edx
11854 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11855 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11856 +
11857 +#ifdef CONFIG_PAX_KERNEXEC
11858 + mov %cr0, %esi
11859 + btr $16, %esi
11860 + mov %esi, %cr0
11861 +#endif
11862 +
11863 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11864 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11865 +
11866 +#ifdef CONFIG_PAX_KERNEXEC
11867 + bts $16, %esi
11868 + mov %esi, %cr0
11869 +#endif
11870 +
11871 pushl_cfi $__ESPFIX_SS
11872 pushl_cfi %eax /* new kernel esp */
11873 /* Disable interrupts, but do not irqtrace this section: we
11874 @@ -613,34 +824,28 @@ work_resched:
11875 movl TI_flags(%ebp), %ecx
11876 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11877 # than syscall tracing?
11878 - jz restore_all
11879 + jz restore_all_pax
11880 testb $_TIF_NEED_RESCHED, %cl
11881 jnz work_resched
11882
11883 work_notifysig: # deal with pending signals and
11884 # notify-resume requests
11885 + movl %esp, %eax
11886 #ifdef CONFIG_VM86
11887 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11888 - movl %esp, %eax
11889 - jne work_notifysig_v86 # returning to kernel-space or
11890 + jz 1f # returning to kernel-space or
11891 # vm86-space
11892 - xorl %edx, %edx
11893 - call do_notify_resume
11894 - jmp resume_userspace_sig
11895
11896 - ALIGN
11897 -work_notifysig_v86:
11898 pushl_cfi %ecx # save ti_flags for do_notify_resume
11899 call save_v86_state # %eax contains pt_regs pointer
11900 popl_cfi %ecx
11901 movl %eax, %esp
11902 -#else
11903 - movl %esp, %eax
11904 +1:
11905 #endif
11906 xorl %edx, %edx
11907 call do_notify_resume
11908 jmp resume_userspace_sig
11909 -END(work_pending)
11910 +ENDPROC(work_pending)
11911
11912 # perform syscall exit tracing
11913 ALIGN
11914 @@ -648,11 +853,14 @@ syscall_trace_entry:
11915 movl $-ENOSYS,PT_EAX(%esp)
11916 movl %esp, %eax
11917 call syscall_trace_enter
11918 +
11919 + pax_erase_kstack
11920 +
11921 /* What it returned is what we'll actually use. */
11922 cmpl $(nr_syscalls), %eax
11923 jnae syscall_call
11924 jmp syscall_exit
11925 -END(syscall_trace_entry)
11926 +ENDPROC(syscall_trace_entry)
11927
11928 # perform syscall exit tracing
11929 ALIGN
11930 @@ -665,20 +873,24 @@ syscall_exit_work:
11931 movl %esp, %eax
11932 call syscall_trace_leave
11933 jmp resume_userspace
11934 -END(syscall_exit_work)
11935 +ENDPROC(syscall_exit_work)
11936 CFI_ENDPROC
11937
11938 RING0_INT_FRAME # can't unwind into user space anyway
11939 syscall_fault:
11940 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11941 + push %ss
11942 + pop %ds
11943 +#endif
11944 GET_THREAD_INFO(%ebp)
11945 movl $-EFAULT,PT_EAX(%esp)
11946 jmp resume_userspace
11947 -END(syscall_fault)
11948 +ENDPROC(syscall_fault)
11949
11950 syscall_badsys:
11951 movl $-ENOSYS,PT_EAX(%esp)
11952 jmp resume_userspace
11953 -END(syscall_badsys)
11954 +ENDPROC(syscall_badsys)
11955 CFI_ENDPROC
11956 /*
11957 * End of kprobes section
11958 @@ -752,6 +964,36 @@ ptregs_clone:
11959 CFI_ENDPROC
11960 ENDPROC(ptregs_clone)
11961
11962 + ALIGN;
11963 +ENTRY(kernel_execve)
11964 + CFI_STARTPROC
11965 + pushl_cfi %ebp
11966 + sub $PT_OLDSS+4,%esp
11967 + pushl_cfi %edi
11968 + pushl_cfi %ecx
11969 + pushl_cfi %eax
11970 + lea 3*4(%esp),%edi
11971 + mov $PT_OLDSS/4+1,%ecx
11972 + xorl %eax,%eax
11973 + rep stosl
11974 + popl_cfi %eax
11975 + popl_cfi %ecx
11976 + popl_cfi %edi
11977 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11978 + pushl_cfi %esp
11979 + call sys_execve
11980 + add $4,%esp
11981 + CFI_ADJUST_CFA_OFFSET -4
11982 + GET_THREAD_INFO(%ebp)
11983 + test %eax,%eax
11984 + jz syscall_exit
11985 + add $PT_OLDSS+4,%esp
11986 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11987 + popl_cfi %ebp
11988 + ret
11989 + CFI_ENDPROC
11990 +ENDPROC(kernel_execve)
11991 +
11992 .macro FIXUP_ESPFIX_STACK
11993 /*
11994 * Switch back for ESPFIX stack to the normal zerobased stack
11995 @@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11996 * normal stack and adjusts ESP with the matching offset.
11997 */
11998 /* fixup the stack */
11999 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
12000 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
12001 +#ifdef CONFIG_SMP
12002 + movl PER_CPU_VAR(cpu_number), %ebx
12003 + shll $PAGE_SHIFT_asm, %ebx
12004 + addl $cpu_gdt_table, %ebx
12005 +#else
12006 + movl $cpu_gdt_table, %ebx
12007 +#endif
12008 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
12009 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
12010 shl $16, %eax
12011 addl %esp, %eax /* the adjusted stack pointer */
12012 pushl_cfi $__KERNEL_DS
12013 @@ -815,7 +1064,7 @@ vector=vector+1
12014 .endr
12015 2: jmp common_interrupt
12016 .endr
12017 -END(irq_entries_start)
12018 +ENDPROC(irq_entries_start)
12019
12020 .previous
12021 END(interrupt)
12022 @@ -863,7 +1112,7 @@ ENTRY(coprocessor_error)
12023 pushl_cfi $do_coprocessor_error
12024 jmp error_code
12025 CFI_ENDPROC
12026 -END(coprocessor_error)
12027 +ENDPROC(coprocessor_error)
12028
12029 ENTRY(simd_coprocessor_error)
12030 RING0_INT_FRAME
12031 @@ -889,7 +1138,7 @@ ENTRY(simd_coprocessor_error)
12032 #endif
12033 jmp error_code
12034 CFI_ENDPROC
12035 -END(simd_coprocessor_error)
12036 +ENDPROC(simd_coprocessor_error)
12037
12038 ENTRY(device_not_available)
12039 RING0_INT_FRAME
12040 @@ -897,7 +1146,7 @@ ENTRY(device_not_available)
12041 pushl_cfi $do_device_not_available
12042 jmp error_code
12043 CFI_ENDPROC
12044 -END(device_not_available)
12045 +ENDPROC(device_not_available)
12046
12047 #ifdef CONFIG_PARAVIRT
12048 ENTRY(native_iret)
12049 @@ -906,12 +1155,12 @@ ENTRY(native_iret)
12050 .align 4
12051 .long native_iret, iret_exc
12052 .previous
12053 -END(native_iret)
12054 +ENDPROC(native_iret)
12055
12056 ENTRY(native_irq_enable_sysexit)
12057 sti
12058 sysexit
12059 -END(native_irq_enable_sysexit)
12060 +ENDPROC(native_irq_enable_sysexit)
12061 #endif
12062
12063 ENTRY(overflow)
12064 @@ -920,7 +1169,7 @@ ENTRY(overflow)
12065 pushl_cfi $do_overflow
12066 jmp error_code
12067 CFI_ENDPROC
12068 -END(overflow)
12069 +ENDPROC(overflow)
12070
12071 ENTRY(bounds)
12072 RING0_INT_FRAME
12073 @@ -928,7 +1177,7 @@ ENTRY(bounds)
12074 pushl_cfi $do_bounds
12075 jmp error_code
12076 CFI_ENDPROC
12077 -END(bounds)
12078 +ENDPROC(bounds)
12079
12080 ENTRY(invalid_op)
12081 RING0_INT_FRAME
12082 @@ -936,7 +1185,7 @@ ENTRY(invalid_op)
12083 pushl_cfi $do_invalid_op
12084 jmp error_code
12085 CFI_ENDPROC
12086 -END(invalid_op)
12087 +ENDPROC(invalid_op)
12088
12089 ENTRY(coprocessor_segment_overrun)
12090 RING0_INT_FRAME
12091 @@ -944,35 +1193,35 @@ ENTRY(coprocessor_segment_overrun)
12092 pushl_cfi $do_coprocessor_segment_overrun
12093 jmp error_code
12094 CFI_ENDPROC
12095 -END(coprocessor_segment_overrun)
12096 +ENDPROC(coprocessor_segment_overrun)
12097
12098 ENTRY(invalid_TSS)
12099 RING0_EC_FRAME
12100 pushl_cfi $do_invalid_TSS
12101 jmp error_code
12102 CFI_ENDPROC
12103 -END(invalid_TSS)
12104 +ENDPROC(invalid_TSS)
12105
12106 ENTRY(segment_not_present)
12107 RING0_EC_FRAME
12108 pushl_cfi $do_segment_not_present
12109 jmp error_code
12110 CFI_ENDPROC
12111 -END(segment_not_present)
12112 +ENDPROC(segment_not_present)
12113
12114 ENTRY(stack_segment)
12115 RING0_EC_FRAME
12116 pushl_cfi $do_stack_segment
12117 jmp error_code
12118 CFI_ENDPROC
12119 -END(stack_segment)
12120 +ENDPROC(stack_segment)
12121
12122 ENTRY(alignment_check)
12123 RING0_EC_FRAME
12124 pushl_cfi $do_alignment_check
12125 jmp error_code
12126 CFI_ENDPROC
12127 -END(alignment_check)
12128 +ENDPROC(alignment_check)
12129
12130 ENTRY(divide_error)
12131 RING0_INT_FRAME
12132 @@ -980,7 +1229,7 @@ ENTRY(divide_error)
12133 pushl_cfi $do_divide_error
12134 jmp error_code
12135 CFI_ENDPROC
12136 -END(divide_error)
12137 +ENDPROC(divide_error)
12138
12139 #ifdef CONFIG_X86_MCE
12140 ENTRY(machine_check)
12141 @@ -989,7 +1238,7 @@ ENTRY(machine_check)
12142 pushl_cfi machine_check_vector
12143 jmp error_code
12144 CFI_ENDPROC
12145 -END(machine_check)
12146 +ENDPROC(machine_check)
12147 #endif
12148
12149 ENTRY(spurious_interrupt_bug)
12150 @@ -998,7 +1247,7 @@ ENTRY(spurious_interrupt_bug)
12151 pushl_cfi $do_spurious_interrupt_bug
12152 jmp error_code
12153 CFI_ENDPROC
12154 -END(spurious_interrupt_bug)
12155 +ENDPROC(spurious_interrupt_bug)
12156 /*
12157 * End of kprobes section
12158 */
12159 @@ -1113,7 +1362,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector
12160
12161 ENTRY(mcount)
12162 ret
12163 -END(mcount)
12164 +ENDPROC(mcount)
12165
12166 ENTRY(ftrace_caller)
12167 cmpl $0, function_trace_stop
12168 @@ -1142,7 +1391,7 @@ ftrace_graph_call:
12169 .globl ftrace_stub
12170 ftrace_stub:
12171 ret
12172 -END(ftrace_caller)
12173 +ENDPROC(ftrace_caller)
12174
12175 #else /* ! CONFIG_DYNAMIC_FTRACE */
12176
12177 @@ -1178,7 +1427,7 @@ trace:
12178 popl %ecx
12179 popl %eax
12180 jmp ftrace_stub
12181 -END(mcount)
12182 +ENDPROC(mcount)
12183 #endif /* CONFIG_DYNAMIC_FTRACE */
12184 #endif /* CONFIG_FUNCTION_TRACER */
12185
12186 @@ -1199,7 +1448,7 @@ ENTRY(ftrace_graph_caller)
12187 popl %ecx
12188 popl %eax
12189 ret
12190 -END(ftrace_graph_caller)
12191 +ENDPROC(ftrace_graph_caller)
12192
12193 .globl return_to_handler
12194 return_to_handler:
12195 @@ -1213,7 +1462,6 @@ return_to_handler:
12196 jmp *%ecx
12197 #endif
12198
12199 -.section .rodata,"a"
12200 #include "syscall_table_32.S"
12201
12202 syscall_table_size=(.-sys_call_table)
12203 @@ -1259,15 +1507,18 @@ error_code:
12204 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12205 REG_TO_PTGS %ecx
12206 SET_KERNEL_GS %ecx
12207 - movl $(__USER_DS), %ecx
12208 + movl $(__KERNEL_DS), %ecx
12209 movl %ecx, %ds
12210 movl %ecx, %es
12211 +
12212 + pax_enter_kernel
12213 +
12214 TRACE_IRQS_OFF
12215 movl %esp,%eax # pt_regs pointer
12216 call *%edi
12217 jmp ret_from_exception
12218 CFI_ENDPROC
12219 -END(page_fault)
12220 +ENDPROC(page_fault)
12221
12222 /*
12223 * Debug traps and NMI can happen at the one SYSENTER instruction
12224 @@ -1309,7 +1560,7 @@ debug_stack_correct:
12225 call do_debug
12226 jmp ret_from_exception
12227 CFI_ENDPROC
12228 -END(debug)
12229 +ENDPROC(debug)
12230
12231 /*
12232 * NMI is doubly nasty. It can happen _while_ we're handling
12233 @@ -1346,6 +1597,9 @@ nmi_stack_correct:
12234 xorl %edx,%edx # zero error code
12235 movl %esp,%eax # pt_regs pointer
12236 call do_nmi
12237 +
12238 + pax_exit_kernel
12239 +
12240 jmp restore_all_notrace
12241 CFI_ENDPROC
12242
12243 @@ -1382,12 +1636,15 @@ nmi_espfix_stack:
12244 FIXUP_ESPFIX_STACK # %eax == %esp
12245 xorl %edx,%edx # zero error code
12246 call do_nmi
12247 +
12248 + pax_exit_kernel
12249 +
12250 RESTORE_REGS
12251 lss 12+4(%esp), %esp # back to espfix stack
12252 CFI_ADJUST_CFA_OFFSET -24
12253 jmp irq_return
12254 CFI_ENDPROC
12255 -END(nmi)
12256 +ENDPROC(nmi)
12257
12258 ENTRY(int3)
12259 RING0_INT_FRAME
12260 @@ -1399,14 +1656,14 @@ ENTRY(int3)
12261 call do_int3
12262 jmp ret_from_exception
12263 CFI_ENDPROC
12264 -END(int3)
12265 +ENDPROC(int3)
12266
12267 ENTRY(general_protection)
12268 RING0_EC_FRAME
12269 pushl_cfi $do_general_protection
12270 jmp error_code
12271 CFI_ENDPROC
12272 -END(general_protection)
12273 +ENDPROC(general_protection)
12274
12275 #ifdef CONFIG_KVM_GUEST
12276 ENTRY(async_page_fault)
12277 @@ -1414,7 +1671,7 @@ ENTRY(async_page_fault)
12278 pushl_cfi $do_async_page_fault
12279 jmp error_code
12280 CFI_ENDPROC
12281 -END(async_page_fault)
12282 +ENDPROC(async_page_fault)
12283 #endif
12284
12285 /*
12286 diff -urNp linux-3.0.8/arch/x86/kernel/entry_64.S linux-3.0.8/arch/x86/kernel/entry_64.S
12287 --- linux-3.0.8/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
12288 +++ linux-3.0.8/arch/x86/kernel/entry_64.S 2011-10-20 04:46:01.000000000 -0400
12289 @@ -53,6 +53,8 @@
12290 #include <asm/paravirt.h>
12291 #include <asm/ftrace.h>
12292 #include <asm/percpu.h>
12293 +#include <asm/pgtable.h>
12294 +#include <asm/alternative-asm.h>
12295
12296 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12297 #include <linux/elf-em.h>
12298 @@ -66,8 +68,9 @@
12299 #ifdef CONFIG_FUNCTION_TRACER
12300 #ifdef CONFIG_DYNAMIC_FTRACE
12301 ENTRY(mcount)
12302 + pax_force_retaddr
12303 retq
12304 -END(mcount)
12305 +ENDPROC(mcount)
12306
12307 ENTRY(ftrace_caller)
12308 cmpl $0, function_trace_stop
12309 @@ -90,8 +93,9 @@ GLOBAL(ftrace_graph_call)
12310 #endif
12311
12312 GLOBAL(ftrace_stub)
12313 + pax_force_retaddr
12314 retq
12315 -END(ftrace_caller)
12316 +ENDPROC(ftrace_caller)
12317
12318 #else /* ! CONFIG_DYNAMIC_FTRACE */
12319 ENTRY(mcount)
12320 @@ -110,6 +114,7 @@ ENTRY(mcount)
12321 #endif
12322
12323 GLOBAL(ftrace_stub)
12324 + pax_force_retaddr
12325 retq
12326
12327 trace:
12328 @@ -119,12 +124,13 @@ trace:
12329 movq 8(%rbp), %rsi
12330 subq $MCOUNT_INSN_SIZE, %rdi
12331
12332 + pax_force_fptr ftrace_trace_function
12333 call *ftrace_trace_function
12334
12335 MCOUNT_RESTORE_FRAME
12336
12337 jmp ftrace_stub
12338 -END(mcount)
12339 +ENDPROC(mcount)
12340 #endif /* CONFIG_DYNAMIC_FTRACE */
12341 #endif /* CONFIG_FUNCTION_TRACER */
12342
12343 @@ -144,8 +150,9 @@ ENTRY(ftrace_graph_caller)
12344
12345 MCOUNT_RESTORE_FRAME
12346
12347 + pax_force_retaddr
12348 retq
12349 -END(ftrace_graph_caller)
12350 +ENDPROC(ftrace_graph_caller)
12351
12352 GLOBAL(return_to_handler)
12353 subq $24, %rsp
12354 @@ -161,6 +168,7 @@ GLOBAL(return_to_handler)
12355 movq 8(%rsp), %rdx
12356 movq (%rsp), %rax
12357 addq $24, %rsp
12358 + pax_force_fptr %rdi
12359 jmp *%rdi
12360 #endif
12361
12362 @@ -176,6 +184,269 @@ ENTRY(native_usergs_sysret64)
12363 ENDPROC(native_usergs_sysret64)
12364 #endif /* CONFIG_PARAVIRT */
12365
12366 + .macro ljmpq sel, off
12367 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12368 + .byte 0x48; ljmp *1234f(%rip)
12369 + .pushsection .rodata
12370 + .align 16
12371 + 1234: .quad \off; .word \sel
12372 + .popsection
12373 +#else
12374 + pushq $\sel
12375 + pushq $\off
12376 + lretq
12377 +#endif
12378 + .endm
12379 +
12380 + .macro pax_enter_kernel
12381 +#ifdef CONFIG_PAX_KERNEXEC
12382 + call pax_enter_kernel
12383 +#endif
12384 + .endm
12385 +
12386 + .macro pax_exit_kernel
12387 +#ifdef CONFIG_PAX_KERNEXEC
12388 + call pax_exit_kernel
12389 +#endif
12390 + .endm
12391 +
12392 +#ifdef CONFIG_PAX_KERNEXEC
12393 +ENTRY(pax_enter_kernel)
12394 + pushq %rdi
12395 +
12396 +#ifdef CONFIG_PARAVIRT
12397 + PV_SAVE_REGS(CLBR_RDI)
12398 +#endif
12399 +
12400 + GET_CR0_INTO_RDI
12401 + bts $16,%rdi
12402 + jnc 1f
12403 + mov %cs,%edi
12404 + cmp $__KERNEL_CS,%edi
12405 + jz 3f
12406 + ljmpq __KERNEL_CS,3f
12407 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
12408 +2: SET_RDI_INTO_CR0
12409 +3:
12410 +
12411 +#ifdef CONFIG_PARAVIRT
12412 + PV_RESTORE_REGS(CLBR_RDI)
12413 +#endif
12414 +
12415 + popq %rdi
12416 + pax_force_retaddr
12417 + retq
12418 +ENDPROC(pax_enter_kernel)
12419 +
12420 +ENTRY(pax_exit_kernel)
12421 + pushq %rdi
12422 +
12423 +#ifdef CONFIG_PARAVIRT
12424 + PV_SAVE_REGS(CLBR_RDI)
12425 +#endif
12426 +
12427 + mov %cs,%rdi
12428 + cmp $__KERNEXEC_KERNEL_CS,%edi
12429 + jnz 2f
12430 + GET_CR0_INTO_RDI
12431 + btr $16,%rdi
12432 + ljmpq __KERNEL_CS,1f
12433 +1: SET_RDI_INTO_CR0
12434 +2:
12435 +
12436 +#ifdef CONFIG_PARAVIRT
12437 + PV_RESTORE_REGS(CLBR_RDI);
12438 +#endif
12439 +
12440 + popq %rdi
12441 + pax_force_retaddr
12442 + retq
12443 +ENDPROC(pax_exit_kernel)
12444 +#endif
12445 +
12446 + .macro pax_enter_kernel_user
12447 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12448 + call pax_enter_kernel_user
12449 +#endif
12450 + .endm
12451 +
12452 + .macro pax_exit_kernel_user
12453 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12454 + call pax_exit_kernel_user
12455 +#endif
12456 +#ifdef CONFIG_PAX_RANDKSTACK
12457 + push %rax
12458 + call pax_randomize_kstack
12459 + pop %rax
12460 +#endif
12461 + .endm
12462 +
12463 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12464 +ENTRY(pax_enter_kernel_user)
12465 + pushq %rdi
12466 + pushq %rbx
12467 +
12468 +#ifdef CONFIG_PARAVIRT
12469 + PV_SAVE_REGS(CLBR_RDI)
12470 +#endif
12471 +
12472 + GET_CR3_INTO_RDI
12473 + mov %rdi,%rbx
12474 + add $__START_KERNEL_map,%rbx
12475 + sub phys_base(%rip),%rbx
12476 +
12477 +#ifdef CONFIG_PARAVIRT
12478 + pushq %rdi
12479 + cmpl $0, pv_info+PARAVIRT_enabled
12480 + jz 1f
12481 + i = 0
12482 + .rept USER_PGD_PTRS
12483 + mov i*8(%rbx),%rsi
12484 + mov $0,%sil
12485 + lea i*8(%rbx),%rdi
12486 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12487 + i = i + 1
12488 + .endr
12489 + jmp 2f
12490 +1:
12491 +#endif
12492 +
12493 + i = 0
12494 + .rept USER_PGD_PTRS
12495 + movb $0,i*8(%rbx)
12496 + i = i + 1
12497 + .endr
12498 +
12499 +#ifdef CONFIG_PARAVIRT
12500 +2: popq %rdi
12501 +#endif
12502 + SET_RDI_INTO_CR3
12503 +
12504 +#ifdef CONFIG_PAX_KERNEXEC
12505 + GET_CR0_INTO_RDI
12506 + bts $16,%rdi
12507 + SET_RDI_INTO_CR0
12508 +#endif
12509 +
12510 +#ifdef CONFIG_PARAVIRT
12511 + PV_RESTORE_REGS(CLBR_RDI)
12512 +#endif
12513 +
12514 + popq %rbx
12515 + popq %rdi
12516 + pax_force_retaddr
12517 + retq
12518 +ENDPROC(pax_enter_kernel_user)
12519 +
12520 +ENTRY(pax_exit_kernel_user)
12521 + push %rdi
12522 +
12523 +#ifdef CONFIG_PARAVIRT
12524 + pushq %rbx
12525 + PV_SAVE_REGS(CLBR_RDI)
12526 +#endif
12527 +
12528 +#ifdef CONFIG_PAX_KERNEXEC
12529 + GET_CR0_INTO_RDI
12530 + btr $16,%rdi
12531 + SET_RDI_INTO_CR0
12532 +#endif
12533 +
12534 + GET_CR3_INTO_RDI
12535 + add $__START_KERNEL_map,%rdi
12536 + sub phys_base(%rip),%rdi
12537 +
12538 +#ifdef CONFIG_PARAVIRT
12539 + cmpl $0, pv_info+PARAVIRT_enabled
12540 + jz 1f
12541 + mov %rdi,%rbx
12542 + i = 0
12543 + .rept USER_PGD_PTRS
12544 + mov i*8(%rbx),%rsi
12545 + mov $0x67,%sil
12546 + lea i*8(%rbx),%rdi
12547 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12548 + i = i + 1
12549 + .endr
12550 + jmp 2f
12551 +1:
12552 +#endif
12553 +
12554 + i = 0
12555 + .rept USER_PGD_PTRS
12556 + movb $0x67,i*8(%rdi)
12557 + i = i + 1
12558 + .endr
12559 +
12560 +#ifdef CONFIG_PARAVIRT
12561 +2: PV_RESTORE_REGS(CLBR_RDI)
12562 + popq %rbx
12563 +#endif
12564 +
12565 + popq %rdi
12566 + pax_force_retaddr
12567 + retq
12568 +ENDPROC(pax_exit_kernel_user)
12569 +#endif
12570 +
12571 + .macro pax_erase_kstack
12572 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12573 + call pax_erase_kstack
12574 +#endif
12575 + .endm
12576 +
12577 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12578 +/*
12579 + * r10: thread_info
12580 + * rcx, rdx: can be clobbered
12581 + */
12582 +ENTRY(pax_erase_kstack)
12583 + pushq %rdi
12584 + pushq %rax
12585 + pushq %r10
12586 +
12587 + GET_THREAD_INFO(%r10)
12588 + mov TI_lowest_stack(%r10), %rdi
12589 + mov $-0xBEEF, %rax
12590 + std
12591 +
12592 +1: mov %edi, %ecx
12593 + and $THREAD_SIZE_asm - 1, %ecx
12594 + shr $3, %ecx
12595 + repne scasq
12596 + jecxz 2f
12597 +
12598 + cmp $2*8, %ecx
12599 + jc 2f
12600 +
12601 + mov $2*8, %ecx
12602 + repe scasq
12603 + jecxz 2f
12604 + jne 1b
12605 +
12606 +2: cld
12607 + mov %esp, %ecx
12608 + sub %edi, %ecx
12609 +
12610 + cmp $THREAD_SIZE_asm, %rcx
12611 + jb 3f
12612 + ud2
12613 +3:
12614 +
12615 + shr $3, %ecx
12616 + rep stosq
12617 +
12618 + mov TI_task_thread_sp0(%r10), %rdi
12619 + sub $256, %rdi
12620 + mov %rdi, TI_lowest_stack(%r10)
12621 +
12622 + popq %r10
12623 + popq %rax
12624 + popq %rdi
12625 + pax_force_retaddr
12626 + ret
12627 +ENDPROC(pax_erase_kstack)
12628 +#endif
12629
12630 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12631 #ifdef CONFIG_TRACE_IRQFLAGS
12632 @@ -318,7 +589,7 @@ ENTRY(save_args)
12633 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12634 movq_cfi rbp, 8 /* push %rbp */
12635 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12636 - testl $3, CS(%rdi)
12637 + testb $3, CS(%rdi)
12638 je 1f
12639 SWAPGS
12640 /*
12641 @@ -338,9 +609,10 @@ ENTRY(save_args)
12642 * We entered an interrupt context - irqs are off:
12643 */
12644 2: TRACE_IRQS_OFF
12645 + pax_force_retaddr
12646 ret
12647 CFI_ENDPROC
12648 -END(save_args)
12649 +ENDPROC(save_args)
12650 .popsection
12651
12652 ENTRY(save_rest)
12653 @@ -354,9 +626,10 @@ ENTRY(save_rest)
12654 movq_cfi r15, R15+16
12655 movq %r11, 8(%rsp) /* return address */
12656 FIXUP_TOP_OF_STACK %r11, 16
12657 + pax_force_retaddr
12658 ret
12659 CFI_ENDPROC
12660 -END(save_rest)
12661 +ENDPROC(save_rest)
12662
12663 /* save complete stack frame */
12664 .pushsection .kprobes.text, "ax"
12665 @@ -385,9 +658,10 @@ ENTRY(save_paranoid)
12666 js 1f /* negative -> in kernel */
12667 SWAPGS
12668 xorl %ebx,%ebx
12669 -1: ret
12670 +1: pax_force_retaddr
12671 + ret
12672 CFI_ENDPROC
12673 -END(save_paranoid)
12674 +ENDPROC(save_paranoid)
12675 .popsection
12676
12677 /*
12678 @@ -409,7 +683,7 @@ ENTRY(ret_from_fork)
12679
12680 RESTORE_REST
12681
12682 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12683 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12684 je int_ret_from_sys_call
12685
12686 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12687 @@ -419,7 +693,7 @@ ENTRY(ret_from_fork)
12688 jmp ret_from_sys_call # go to the SYSRET fastpath
12689
12690 CFI_ENDPROC
12691 -END(ret_from_fork)
12692 +ENDPROC(ret_from_fork)
12693
12694 /*
12695 * System call entry. Up to 6 arguments in registers are supported.
12696 @@ -455,7 +729,7 @@ END(ret_from_fork)
12697 ENTRY(system_call)
12698 CFI_STARTPROC simple
12699 CFI_SIGNAL_FRAME
12700 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12701 + CFI_DEF_CFA rsp,0
12702 CFI_REGISTER rip,rcx
12703 /*CFI_REGISTER rflags,r11*/
12704 SWAPGS_UNSAFE_STACK
12705 @@ -468,12 +742,13 @@ ENTRY(system_call_after_swapgs)
12706
12707 movq %rsp,PER_CPU_VAR(old_rsp)
12708 movq PER_CPU_VAR(kernel_stack),%rsp
12709 + pax_enter_kernel_user
12710 /*
12711 * No need to follow this irqs off/on section - it's straight
12712 * and short:
12713 */
12714 ENABLE_INTERRUPTS(CLBR_NONE)
12715 - SAVE_ARGS 8,1
12716 + SAVE_ARGS 8*6,1
12717 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12718 movq %rcx,RIP-ARGOFFSET(%rsp)
12719 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12720 @@ -502,6 +777,8 @@ sysret_check:
12721 andl %edi,%edx
12722 jnz sysret_careful
12723 CFI_REMEMBER_STATE
12724 + pax_exit_kernel_user
12725 + pax_erase_kstack
12726 /*
12727 * sysretq will re-enable interrupts:
12728 */
12729 @@ -560,6 +837,9 @@ auditsys:
12730 movq %rax,%rsi /* 2nd arg: syscall number */
12731 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12732 call audit_syscall_entry
12733 +
12734 + pax_erase_kstack
12735 +
12736 LOAD_ARGS 0 /* reload call-clobbered registers */
12737 jmp system_call_fastpath
12738
12739 @@ -590,6 +870,9 @@ tracesys:
12740 FIXUP_TOP_OF_STACK %rdi
12741 movq %rsp,%rdi
12742 call syscall_trace_enter
12743 +
12744 + pax_erase_kstack
12745 +
12746 /*
12747 * Reload arg registers from stack in case ptrace changed them.
12748 * We don't reload %rax because syscall_trace_enter() returned
12749 @@ -611,7 +894,7 @@ tracesys:
12750 GLOBAL(int_ret_from_sys_call)
12751 DISABLE_INTERRUPTS(CLBR_NONE)
12752 TRACE_IRQS_OFF
12753 - testl $3,CS-ARGOFFSET(%rsp)
12754 + testb $3,CS-ARGOFFSET(%rsp)
12755 je retint_restore_args
12756 movl $_TIF_ALLWORK_MASK,%edi
12757 /* edi: mask to check */
12758 @@ -668,7 +951,7 @@ int_restore_rest:
12759 TRACE_IRQS_OFF
12760 jmp int_with_check
12761 CFI_ENDPROC
12762 -END(system_call)
12763 +ENDPROC(system_call)
12764
12765 /*
12766 * Certain special system calls that need to save a complete full stack frame.
12767 @@ -684,7 +967,7 @@ ENTRY(\label)
12768 call \func
12769 jmp ptregscall_common
12770 CFI_ENDPROC
12771 -END(\label)
12772 +ENDPROC(\label)
12773 .endm
12774
12775 PTREGSCALL stub_clone, sys_clone, %r8
12776 @@ -702,9 +985,10 @@ ENTRY(ptregscall_common)
12777 movq_cfi_restore R12+8, r12
12778 movq_cfi_restore RBP+8, rbp
12779 movq_cfi_restore RBX+8, rbx
12780 + pax_force_retaddr
12781 ret $REST_SKIP /* pop extended registers */
12782 CFI_ENDPROC
12783 -END(ptregscall_common)
12784 +ENDPROC(ptregscall_common)
12785
12786 ENTRY(stub_execve)
12787 CFI_STARTPROC
12788 @@ -719,7 +1003,7 @@ ENTRY(stub_execve)
12789 RESTORE_REST
12790 jmp int_ret_from_sys_call
12791 CFI_ENDPROC
12792 -END(stub_execve)
12793 +ENDPROC(stub_execve)
12794
12795 /*
12796 * sigreturn is special because it needs to restore all registers on return.
12797 @@ -737,7 +1021,7 @@ ENTRY(stub_rt_sigreturn)
12798 RESTORE_REST
12799 jmp int_ret_from_sys_call
12800 CFI_ENDPROC
12801 -END(stub_rt_sigreturn)
12802 +ENDPROC(stub_rt_sigreturn)
12803
12804 /*
12805 * Build the entry stubs and pointer table with some assembler magic.
12806 @@ -772,7 +1056,7 @@ vector=vector+1
12807 2: jmp common_interrupt
12808 .endr
12809 CFI_ENDPROC
12810 -END(irq_entries_start)
12811 +ENDPROC(irq_entries_start)
12812
12813 .previous
12814 END(interrupt)
12815 @@ -793,6 +1077,16 @@ END(interrupt)
12816 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12817 call save_args
12818 PARTIAL_FRAME 0
12819 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12820 + testb $3, CS(%rdi)
12821 + jnz 1f
12822 + pax_enter_kernel
12823 + jmp 2f
12824 +1: pax_enter_kernel_user
12825 +2:
12826 +#else
12827 + pax_enter_kernel
12828 +#endif
12829 call \func
12830 .endm
12831
12832 @@ -825,7 +1119,7 @@ ret_from_intr:
12833 CFI_ADJUST_CFA_OFFSET -8
12834 exit_intr:
12835 GET_THREAD_INFO(%rcx)
12836 - testl $3,CS-ARGOFFSET(%rsp)
12837 + testb $3,CS-ARGOFFSET(%rsp)
12838 je retint_kernel
12839
12840 /* Interrupt came from user space */
12841 @@ -847,12 +1141,16 @@ retint_swapgs: /* return to user-space
12842 * The iretq could re-enable interrupts:
12843 */
12844 DISABLE_INTERRUPTS(CLBR_ANY)
12845 + pax_exit_kernel_user
12846 + pax_erase_kstack
12847 TRACE_IRQS_IRETQ
12848 SWAPGS
12849 jmp restore_args
12850
12851 retint_restore_args: /* return to kernel space */
12852 DISABLE_INTERRUPTS(CLBR_ANY)
12853 + pax_exit_kernel
12854 + pax_force_retaddr RIP-ARGOFFSET
12855 /*
12856 * The iretq could re-enable interrupts:
12857 */
12858 @@ -941,7 +1239,7 @@ ENTRY(retint_kernel)
12859 #endif
12860
12861 CFI_ENDPROC
12862 -END(common_interrupt)
12863 +ENDPROC(common_interrupt)
12864 /*
12865 * End of kprobes section
12866 */
12867 @@ -957,7 +1255,7 @@ ENTRY(\sym)
12868 interrupt \do_sym
12869 jmp ret_from_intr
12870 CFI_ENDPROC
12871 -END(\sym)
12872 +ENDPROC(\sym)
12873 .endm
12874
12875 #ifdef CONFIG_SMP
12876 @@ -1027,12 +1325,22 @@ ENTRY(\sym)
12877 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12878 call error_entry
12879 DEFAULT_FRAME 0
12880 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12881 + testb $3, CS(%rsp)
12882 + jnz 1f
12883 + pax_enter_kernel
12884 + jmp 2f
12885 +1: pax_enter_kernel_user
12886 +2:
12887 +#else
12888 + pax_enter_kernel
12889 +#endif
12890 movq %rsp,%rdi /* pt_regs pointer */
12891 xorl %esi,%esi /* no error code */
12892 call \do_sym
12893 jmp error_exit /* %ebx: no swapgs flag */
12894 CFI_ENDPROC
12895 -END(\sym)
12896 +ENDPROC(\sym)
12897 .endm
12898
12899 .macro paranoidzeroentry sym do_sym
12900 @@ -1044,15 +1352,25 @@ ENTRY(\sym)
12901 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12902 call save_paranoid
12903 TRACE_IRQS_OFF
12904 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12905 + testb $3, CS(%rsp)
12906 + jnz 1f
12907 + pax_enter_kernel
12908 + jmp 2f
12909 +1: pax_enter_kernel_user
12910 +2:
12911 +#else
12912 + pax_enter_kernel
12913 +#endif
12914 movq %rsp,%rdi /* pt_regs pointer */
12915 xorl %esi,%esi /* no error code */
12916 call \do_sym
12917 jmp paranoid_exit /* %ebx: no swapgs flag */
12918 CFI_ENDPROC
12919 -END(\sym)
12920 +ENDPROC(\sym)
12921 .endm
12922
12923 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12924 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12925 .macro paranoidzeroentry_ist sym do_sym ist
12926 ENTRY(\sym)
12927 INTR_FRAME
12928 @@ -1062,14 +1380,30 @@ ENTRY(\sym)
12929 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12930 call save_paranoid
12931 TRACE_IRQS_OFF
12932 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12933 + testb $3, CS(%rsp)
12934 + jnz 1f
12935 + pax_enter_kernel
12936 + jmp 2f
12937 +1: pax_enter_kernel_user
12938 +2:
12939 +#else
12940 + pax_enter_kernel
12941 +#endif
12942 movq %rsp,%rdi /* pt_regs pointer */
12943 xorl %esi,%esi /* no error code */
12944 +#ifdef CONFIG_SMP
12945 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12946 + lea init_tss(%r12), %r12
12947 +#else
12948 + lea init_tss(%rip), %r12
12949 +#endif
12950 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12951 call \do_sym
12952 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12953 jmp paranoid_exit /* %ebx: no swapgs flag */
12954 CFI_ENDPROC
12955 -END(\sym)
12956 +ENDPROC(\sym)
12957 .endm
12958
12959 .macro errorentry sym do_sym
12960 @@ -1080,13 +1414,23 @@ ENTRY(\sym)
12961 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12962 call error_entry
12963 DEFAULT_FRAME 0
12964 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12965 + testb $3, CS(%rsp)
12966 + jnz 1f
12967 + pax_enter_kernel
12968 + jmp 2f
12969 +1: pax_enter_kernel_user
12970 +2:
12971 +#else
12972 + pax_enter_kernel
12973 +#endif
12974 movq %rsp,%rdi /* pt_regs pointer */
12975 movq ORIG_RAX(%rsp),%rsi /* get error code */
12976 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12977 call \do_sym
12978 jmp error_exit /* %ebx: no swapgs flag */
12979 CFI_ENDPROC
12980 -END(\sym)
12981 +ENDPROC(\sym)
12982 .endm
12983
12984 /* error code is on the stack already */
12985 @@ -1099,13 +1443,23 @@ ENTRY(\sym)
12986 call save_paranoid
12987 DEFAULT_FRAME 0
12988 TRACE_IRQS_OFF
12989 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12990 + testb $3, CS(%rsp)
12991 + jnz 1f
12992 + pax_enter_kernel
12993 + jmp 2f
12994 +1: pax_enter_kernel_user
12995 +2:
12996 +#else
12997 + pax_enter_kernel
12998 +#endif
12999 movq %rsp,%rdi /* pt_regs pointer */
13000 movq ORIG_RAX(%rsp),%rsi /* get error code */
13001 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13002 call \do_sym
13003 jmp paranoid_exit /* %ebx: no swapgs flag */
13004 CFI_ENDPROC
13005 -END(\sym)
13006 +ENDPROC(\sym)
13007 .endm
13008
13009 zeroentry divide_error do_divide_error
13010 @@ -1134,9 +1488,10 @@ gs_change:
13011 2: mfence /* workaround */
13012 SWAPGS
13013 popfq_cfi
13014 + pax_force_retaddr
13015 ret
13016 CFI_ENDPROC
13017 -END(native_load_gs_index)
13018 +ENDPROC(native_load_gs_index)
13019
13020 .section __ex_table,"a"
13021 .align 8
13022 @@ -1158,13 +1513,14 @@ ENTRY(kernel_thread_helper)
13023 * Here we are in the child and the registers are set as they were
13024 * at kernel_thread() invocation in the parent.
13025 */
13026 + pax_force_fptr %rsi
13027 call *%rsi
13028 # exit
13029 mov %eax, %edi
13030 call do_exit
13031 ud2 # padding for call trace
13032 CFI_ENDPROC
13033 -END(kernel_thread_helper)
13034 +ENDPROC(kernel_thread_helper)
13035
13036 /*
13037 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
13038 @@ -1193,9 +1549,10 @@ ENTRY(kernel_execve)
13039 je int_ret_from_sys_call
13040 RESTORE_ARGS
13041 UNFAKE_STACK_FRAME
13042 + pax_force_retaddr
13043 ret
13044 CFI_ENDPROC
13045 -END(kernel_execve)
13046 +ENDPROC(kernel_execve)
13047
13048 /* Call softirq on interrupt stack. Interrupts are off. */
13049 ENTRY(call_softirq)
13050 @@ -1213,9 +1570,10 @@ ENTRY(call_softirq)
13051 CFI_DEF_CFA_REGISTER rsp
13052 CFI_ADJUST_CFA_OFFSET -8
13053 decl PER_CPU_VAR(irq_count)
13054 + pax_force_retaddr
13055 ret
13056 CFI_ENDPROC
13057 -END(call_softirq)
13058 +ENDPROC(call_softirq)
13059
13060 #ifdef CONFIG_XEN
13061 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
13062 @@ -1253,7 +1611,7 @@ ENTRY(xen_do_hypervisor_callback) # do
13063 decl PER_CPU_VAR(irq_count)
13064 jmp error_exit
13065 CFI_ENDPROC
13066 -END(xen_do_hypervisor_callback)
13067 +ENDPROC(xen_do_hypervisor_callback)
13068
13069 /*
13070 * Hypervisor uses this for application faults while it executes.
13071 @@ -1312,7 +1670,7 @@ ENTRY(xen_failsafe_callback)
13072 SAVE_ALL
13073 jmp error_exit
13074 CFI_ENDPROC
13075 -END(xen_failsafe_callback)
13076 +ENDPROC(xen_failsafe_callback)
13077
13078 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
13079 xen_hvm_callback_vector xen_evtchn_do_upcall
13080 @@ -1361,16 +1719,31 @@ ENTRY(paranoid_exit)
13081 TRACE_IRQS_OFF
13082 testl %ebx,%ebx /* swapgs needed? */
13083 jnz paranoid_restore
13084 - testl $3,CS(%rsp)
13085 + testb $3,CS(%rsp)
13086 jnz paranoid_userspace
13087 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13088 + pax_exit_kernel
13089 + TRACE_IRQS_IRETQ 0
13090 + SWAPGS_UNSAFE_STACK
13091 + RESTORE_ALL 8
13092 + pax_force_retaddr
13093 + jmp irq_return
13094 +#endif
13095 paranoid_swapgs:
13096 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13097 + pax_exit_kernel_user
13098 +#else
13099 + pax_exit_kernel
13100 +#endif
13101 TRACE_IRQS_IRETQ 0
13102 SWAPGS_UNSAFE_STACK
13103 RESTORE_ALL 8
13104 jmp irq_return
13105 paranoid_restore:
13106 + pax_exit_kernel
13107 TRACE_IRQS_IRETQ 0
13108 RESTORE_ALL 8
13109 + pax_force_retaddr
13110 jmp irq_return
13111 paranoid_userspace:
13112 GET_THREAD_INFO(%rcx)
13113 @@ -1399,7 +1772,7 @@ paranoid_schedule:
13114 TRACE_IRQS_OFF
13115 jmp paranoid_userspace
13116 CFI_ENDPROC
13117 -END(paranoid_exit)
13118 +ENDPROC(paranoid_exit)
13119
13120 /*
13121 * Exception entry point. This expects an error code/orig_rax on the stack.
13122 @@ -1426,12 +1799,13 @@ ENTRY(error_entry)
13123 movq_cfi r14, R14+8
13124 movq_cfi r15, R15+8
13125 xorl %ebx,%ebx
13126 - testl $3,CS+8(%rsp)
13127 + testb $3,CS+8(%rsp)
13128 je error_kernelspace
13129 error_swapgs:
13130 SWAPGS
13131 error_sti:
13132 TRACE_IRQS_OFF
13133 + pax_force_retaddr
13134 ret
13135
13136 /*
13137 @@ -1458,7 +1832,7 @@ bstep_iret:
13138 movq %rcx,RIP+8(%rsp)
13139 jmp error_swapgs
13140 CFI_ENDPROC
13141 -END(error_entry)
13142 +ENDPROC(error_entry)
13143
13144
13145 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
13146 @@ -1478,7 +1852,7 @@ ENTRY(error_exit)
13147 jnz retint_careful
13148 jmp retint_swapgs
13149 CFI_ENDPROC
13150 -END(error_exit)
13151 +ENDPROC(error_exit)
13152
13153
13154 /* runs on exception stack */
13155 @@ -1490,6 +1864,16 @@ ENTRY(nmi)
13156 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13157 call save_paranoid
13158 DEFAULT_FRAME 0
13159 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13160 + testb $3, CS(%rsp)
13161 + jnz 1f
13162 + pax_enter_kernel
13163 + jmp 2f
13164 +1: pax_enter_kernel_user
13165 +2:
13166 +#else
13167 + pax_enter_kernel
13168 +#endif
13169 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13170 movq %rsp,%rdi
13171 movq $-1,%rsi
13172 @@ -1500,12 +1884,28 @@ ENTRY(nmi)
13173 DISABLE_INTERRUPTS(CLBR_NONE)
13174 testl %ebx,%ebx /* swapgs needed? */
13175 jnz nmi_restore
13176 - testl $3,CS(%rsp)
13177 + testb $3,CS(%rsp)
13178 jnz nmi_userspace
13179 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13180 + pax_exit_kernel
13181 + SWAPGS_UNSAFE_STACK
13182 + RESTORE_ALL 8
13183 + pax_force_retaddr
13184 + jmp irq_return
13185 +#endif
13186 nmi_swapgs:
13187 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13188 + pax_exit_kernel_user
13189 +#else
13190 + pax_exit_kernel
13191 +#endif
13192 SWAPGS_UNSAFE_STACK
13193 + RESTORE_ALL 8
13194 + jmp irq_return
13195 nmi_restore:
13196 + pax_exit_kernel
13197 RESTORE_ALL 8
13198 + pax_force_retaddr
13199 jmp irq_return
13200 nmi_userspace:
13201 GET_THREAD_INFO(%rcx)
13202 @@ -1534,14 +1934,14 @@ nmi_schedule:
13203 jmp paranoid_exit
13204 CFI_ENDPROC
13205 #endif
13206 -END(nmi)
13207 +ENDPROC(nmi)
13208
13209 ENTRY(ignore_sysret)
13210 CFI_STARTPROC
13211 mov $-ENOSYS,%eax
13212 sysret
13213 CFI_ENDPROC
13214 -END(ignore_sysret)
13215 +ENDPROC(ignore_sysret)
13216
13217 /*
13218 * End of kprobes section
13219 diff -urNp linux-3.0.8/arch/x86/kernel/ftrace.c linux-3.0.8/arch/x86/kernel/ftrace.c
13220 --- linux-3.0.8/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
13221 +++ linux-3.0.8/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
13222 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
13223 static const void *mod_code_newcode; /* holds the text to write to the IP */
13224
13225 static unsigned nmi_wait_count;
13226 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13227 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13228
13229 int ftrace_arch_read_dyn_info(char *buf, int size)
13230 {
13231 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13232
13233 r = snprintf(buf, size, "%u %u",
13234 nmi_wait_count,
13235 - atomic_read(&nmi_update_count));
13236 + atomic_read_unchecked(&nmi_update_count));
13237 return r;
13238 }
13239
13240 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
13241
13242 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13243 smp_rmb();
13244 + pax_open_kernel();
13245 ftrace_mod_code();
13246 - atomic_inc(&nmi_update_count);
13247 + pax_close_kernel();
13248 + atomic_inc_unchecked(&nmi_update_count);
13249 }
13250 /* Must have previous changes seen before executions */
13251 smp_mb();
13252 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
13253 {
13254 unsigned char replaced[MCOUNT_INSN_SIZE];
13255
13256 + ip = ktla_ktva(ip);
13257 +
13258 /*
13259 * Note: Due to modules and __init, code can
13260 * disappear and change, we need to protect against faulting
13261 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13262 unsigned char old[MCOUNT_INSN_SIZE], *new;
13263 int ret;
13264
13265 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13266 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13267 new = ftrace_call_replace(ip, (unsigned long)func);
13268 ret = ftrace_modify_code(ip, old, new);
13269
13270 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
13271 {
13272 unsigned char code[MCOUNT_INSN_SIZE];
13273
13274 + ip = ktla_ktva(ip);
13275 +
13276 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13277 return -EFAULT;
13278
13279 diff -urNp linux-3.0.8/arch/x86/kernel/head32.c linux-3.0.8/arch/x86/kernel/head32.c
13280 --- linux-3.0.8/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
13281 +++ linux-3.0.8/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
13282 @@ -19,6 +19,7 @@
13283 #include <asm/io_apic.h>
13284 #include <asm/bios_ebda.h>
13285 #include <asm/tlbflush.h>
13286 +#include <asm/boot.h>
13287
13288 static void __init i386_default_early_setup(void)
13289 {
13290 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
13291 {
13292 memblock_init();
13293
13294 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13295 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13296
13297 #ifdef CONFIG_BLK_DEV_INITRD
13298 /* Reserve INITRD */
13299 diff -urNp linux-3.0.8/arch/x86/kernel/head_32.S linux-3.0.8/arch/x86/kernel/head_32.S
13300 --- linux-3.0.8/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
13301 +++ linux-3.0.8/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
13302 @@ -25,6 +25,12 @@
13303 /* Physical address */
13304 #define pa(X) ((X) - __PAGE_OFFSET)
13305
13306 +#ifdef CONFIG_PAX_KERNEXEC
13307 +#define ta(X) (X)
13308 +#else
13309 +#define ta(X) ((X) - __PAGE_OFFSET)
13310 +#endif
13311 +
13312 /*
13313 * References to members of the new_cpu_data structure.
13314 */
13315 @@ -54,11 +60,7 @@
13316 * and small than max_low_pfn, otherwise will waste some page table entries
13317 */
13318
13319 -#if PTRS_PER_PMD > 1
13320 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13321 -#else
13322 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13323 -#endif
13324 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13325
13326 /* Number of possible pages in the lowmem region */
13327 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
13328 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13329 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13330
13331 /*
13332 + * Real beginning of normal "text" segment
13333 + */
13334 +ENTRY(stext)
13335 +ENTRY(_stext)
13336 +
13337 +/*
13338 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13339 * %esi points to the real-mode code as a 32-bit pointer.
13340 * CS and DS must be 4 GB flat segments, but we don't depend on
13341 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13342 * can.
13343 */
13344 __HEAD
13345 +
13346 +#ifdef CONFIG_PAX_KERNEXEC
13347 + jmp startup_32
13348 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13349 +.fill PAGE_SIZE-5,1,0xcc
13350 +#endif
13351 +
13352 ENTRY(startup_32)
13353 movl pa(stack_start),%ecx
13354
13355 @@ -105,6 +120,57 @@ ENTRY(startup_32)
13356 2:
13357 leal -__PAGE_OFFSET(%ecx),%esp
13358
13359 +#ifdef CONFIG_SMP
13360 + movl $pa(cpu_gdt_table),%edi
13361 + movl $__per_cpu_load,%eax
13362 + movw %ax,__KERNEL_PERCPU + 2(%edi)
13363 + rorl $16,%eax
13364 + movb %al,__KERNEL_PERCPU + 4(%edi)
13365 + movb %ah,__KERNEL_PERCPU + 7(%edi)
13366 + movl $__per_cpu_end - 1,%eax
13367 + subl $__per_cpu_start,%eax
13368 + movw %ax,__KERNEL_PERCPU + 0(%edi)
13369 +#endif
13370 +
13371 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13372 + movl $NR_CPUS,%ecx
13373 + movl $pa(cpu_gdt_table),%edi
13374 +1:
13375 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13376 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13377 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13378 + addl $PAGE_SIZE_asm,%edi
13379 + loop 1b
13380 +#endif
13381 +
13382 +#ifdef CONFIG_PAX_KERNEXEC
13383 + movl $pa(boot_gdt),%edi
13384 + movl $__LOAD_PHYSICAL_ADDR,%eax
13385 + movw %ax,__BOOT_CS + 2(%edi)
13386 + rorl $16,%eax
13387 + movb %al,__BOOT_CS + 4(%edi)
13388 + movb %ah,__BOOT_CS + 7(%edi)
13389 + rorl $16,%eax
13390 +
13391 + ljmp $(__BOOT_CS),$1f
13392 +1:
13393 +
13394 + movl $NR_CPUS,%ecx
13395 + movl $pa(cpu_gdt_table),%edi
13396 + addl $__PAGE_OFFSET,%eax
13397 +1:
13398 + movw %ax,__KERNEL_CS + 2(%edi)
13399 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13400 + rorl $16,%eax
13401 + movb %al,__KERNEL_CS + 4(%edi)
13402 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13403 + movb %ah,__KERNEL_CS + 7(%edi)
13404 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13405 + rorl $16,%eax
13406 + addl $PAGE_SIZE_asm,%edi
13407 + loop 1b
13408 +#endif
13409 +
13410 /*
13411 * Clear BSS first so that there are no surprises...
13412 */
13413 @@ -195,8 +261,11 @@ ENTRY(startup_32)
13414 movl %eax, pa(max_pfn_mapped)
13415
13416 /* Do early initialization of the fixmap area */
13417 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13418 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
13419 +#ifdef CONFIG_COMPAT_VDSO
13420 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
13421 +#else
13422 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
13423 +#endif
13424 #else /* Not PAE */
13425
13426 page_pde_offset = (__PAGE_OFFSET >> 20);
13427 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13428 movl %eax, pa(max_pfn_mapped)
13429
13430 /* Do early initialization of the fixmap area */
13431 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13432 - movl %eax,pa(initial_page_table+0xffc)
13433 +#ifdef CONFIG_COMPAT_VDSO
13434 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
13435 +#else
13436 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
13437 +#endif
13438 #endif
13439
13440 #ifdef CONFIG_PARAVIRT
13441 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13442 cmpl $num_subarch_entries, %eax
13443 jae bad_subarch
13444
13445 - movl pa(subarch_entries)(,%eax,4), %eax
13446 - subl $__PAGE_OFFSET, %eax
13447 - jmp *%eax
13448 + jmp *pa(subarch_entries)(,%eax,4)
13449
13450 bad_subarch:
13451 WEAK(lguest_entry)
13452 @@ -255,10 +325,10 @@ WEAK(xen_entry)
13453 __INITDATA
13454
13455 subarch_entries:
13456 - .long default_entry /* normal x86/PC */
13457 - .long lguest_entry /* lguest hypervisor */
13458 - .long xen_entry /* Xen hypervisor */
13459 - .long default_entry /* Moorestown MID */
13460 + .long ta(default_entry) /* normal x86/PC */
13461 + .long ta(lguest_entry) /* lguest hypervisor */
13462 + .long ta(xen_entry) /* Xen hypervisor */
13463 + .long ta(default_entry) /* Moorestown MID */
13464 num_subarch_entries = (. - subarch_entries) / 4
13465 .previous
13466 #else
13467 @@ -312,6 +382,7 @@ default_entry:
13468 orl %edx,%eax
13469 movl %eax,%cr4
13470
13471 +#ifdef CONFIG_X86_PAE
13472 testb $X86_CR4_PAE, %al # check if PAE is enabled
13473 jz 6f
13474
13475 @@ -340,6 +411,9 @@ default_entry:
13476 /* Make changes effective */
13477 wrmsr
13478
13479 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13480 +#endif
13481 +
13482 6:
13483
13484 /*
13485 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
13486 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13487 movl %eax,%ss # after changing gdt.
13488
13489 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
13490 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13491 movl %eax,%ds
13492 movl %eax,%es
13493
13494 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
13495 */
13496 cmpb $0,ready
13497 jne 1f
13498 - movl $gdt_page,%eax
13499 + movl $cpu_gdt_table,%eax
13500 movl $stack_canary,%ecx
13501 +#ifdef CONFIG_SMP
13502 + addl $__per_cpu_load,%ecx
13503 +#endif
13504 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13505 shrl $16, %ecx
13506 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13507 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13508 1:
13509 -#endif
13510 movl $(__KERNEL_STACK_CANARY),%eax
13511 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13512 + movl $(__USER_DS),%eax
13513 +#else
13514 + xorl %eax,%eax
13515 +#endif
13516 movl %eax,%gs
13517
13518 xorl %eax,%eax # Clear LDT
13519 @@ -558,22 +639,22 @@ early_page_fault:
13520 jmp early_fault
13521
13522 early_fault:
13523 - cld
13524 #ifdef CONFIG_PRINTK
13525 + cmpl $1,%ss:early_recursion_flag
13526 + je hlt_loop
13527 + incl %ss:early_recursion_flag
13528 + cld
13529 pusha
13530 movl $(__KERNEL_DS),%eax
13531 movl %eax,%ds
13532 movl %eax,%es
13533 - cmpl $2,early_recursion_flag
13534 - je hlt_loop
13535 - incl early_recursion_flag
13536 movl %cr2,%eax
13537 pushl %eax
13538 pushl %edx /* trapno */
13539 pushl $fault_msg
13540 call printk
13541 +; call dump_stack
13542 #endif
13543 - call dump_stack
13544 hlt_loop:
13545 hlt
13546 jmp hlt_loop
13547 @@ -581,8 +662,11 @@ hlt_loop:
13548 /* This is the default interrupt "handler" :-) */
13549 ALIGN
13550 ignore_int:
13551 - cld
13552 #ifdef CONFIG_PRINTK
13553 + cmpl $2,%ss:early_recursion_flag
13554 + je hlt_loop
13555 + incl %ss:early_recursion_flag
13556 + cld
13557 pushl %eax
13558 pushl %ecx
13559 pushl %edx
13560 @@ -591,9 +675,6 @@ ignore_int:
13561 movl $(__KERNEL_DS),%eax
13562 movl %eax,%ds
13563 movl %eax,%es
13564 - cmpl $2,early_recursion_flag
13565 - je hlt_loop
13566 - incl early_recursion_flag
13567 pushl 16(%esp)
13568 pushl 24(%esp)
13569 pushl 32(%esp)
13570 @@ -622,29 +703,43 @@ ENTRY(initial_code)
13571 /*
13572 * BSS section
13573 */
13574 -__PAGE_ALIGNED_BSS
13575 - .align PAGE_SIZE
13576 #ifdef CONFIG_X86_PAE
13577 +.section .initial_pg_pmd,"a",@progbits
13578 initial_pg_pmd:
13579 .fill 1024*KPMDS,4,0
13580 #else
13581 +.section .initial_page_table,"a",@progbits
13582 ENTRY(initial_page_table)
13583 .fill 1024,4,0
13584 #endif
13585 +.section .initial_pg_fixmap,"a",@progbits
13586 initial_pg_fixmap:
13587 .fill 1024,4,0
13588 +.section .empty_zero_page,"a",@progbits
13589 ENTRY(empty_zero_page)
13590 .fill 4096,1,0
13591 +.section .swapper_pg_dir,"a",@progbits
13592 ENTRY(swapper_pg_dir)
13593 +#ifdef CONFIG_X86_PAE
13594 + .fill 4,8,0
13595 +#else
13596 .fill 1024,4,0
13597 +#endif
13598 +
13599 +/*
13600 + * The IDT has to be page-aligned to simplify the Pentium
13601 + * F0 0F bug workaround.. We have a special link segment
13602 + * for this.
13603 + */
13604 +.section .idt,"a",@progbits
13605 +ENTRY(idt_table)
13606 + .fill 256,8,0
13607
13608 /*
13609 * This starts the data section.
13610 */
13611 #ifdef CONFIG_X86_PAE
13612 -__PAGE_ALIGNED_DATA
13613 - /* Page-aligned for the benefit of paravirt? */
13614 - .align PAGE_SIZE
13615 +.section .initial_page_table,"a",@progbits
13616 ENTRY(initial_page_table)
13617 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13618 # if KPMDS == 3
13619 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13620 # error "Kernel PMDs should be 1, 2 or 3"
13621 # endif
13622 .align PAGE_SIZE /* needs to be page-sized too */
13623 +
13624 +#ifdef CONFIG_PAX_PER_CPU_PGD
13625 +ENTRY(cpu_pgd)
13626 + .rept NR_CPUS
13627 + .fill 4,8,0
13628 + .endr
13629 +#endif
13630 +
13631 #endif
13632
13633 .data
13634 .balign 4
13635 ENTRY(stack_start)
13636 - .long init_thread_union+THREAD_SIZE
13637 + .long init_thread_union+THREAD_SIZE-8
13638 +
13639 +ready: .byte 0
13640
13641 +.section .rodata,"a",@progbits
13642 early_recursion_flag:
13643 .long 0
13644
13645 -ready: .byte 0
13646 -
13647 int_msg:
13648 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13649
13650 @@ -707,7 +811,7 @@ fault_msg:
13651 .word 0 # 32 bit align gdt_desc.address
13652 boot_gdt_descr:
13653 .word __BOOT_DS+7
13654 - .long boot_gdt - __PAGE_OFFSET
13655 + .long pa(boot_gdt)
13656
13657 .word 0 # 32-bit align idt_desc.address
13658 idt_descr:
13659 @@ -718,7 +822,7 @@ idt_descr:
13660 .word 0 # 32 bit align gdt_desc.address
13661 ENTRY(early_gdt_descr)
13662 .word GDT_ENTRIES*8-1
13663 - .long gdt_page /* Overwritten for secondary CPUs */
13664 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
13665
13666 /*
13667 * The boot_gdt must mirror the equivalent in setup.S and is
13668 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13669 .align L1_CACHE_BYTES
13670 ENTRY(boot_gdt)
13671 .fill GDT_ENTRY_BOOT_CS,8,0
13672 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13673 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13674 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13675 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13676 +
13677 + .align PAGE_SIZE_asm
13678 +ENTRY(cpu_gdt_table)
13679 + .rept NR_CPUS
13680 + .quad 0x0000000000000000 /* NULL descriptor */
13681 + .quad 0x0000000000000000 /* 0x0b reserved */
13682 + .quad 0x0000000000000000 /* 0x13 reserved */
13683 + .quad 0x0000000000000000 /* 0x1b reserved */
13684 +
13685 +#ifdef CONFIG_PAX_KERNEXEC
13686 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13687 +#else
13688 + .quad 0x0000000000000000 /* 0x20 unused */
13689 +#endif
13690 +
13691 + .quad 0x0000000000000000 /* 0x28 unused */
13692 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13693 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13694 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13695 + .quad 0x0000000000000000 /* 0x4b reserved */
13696 + .quad 0x0000000000000000 /* 0x53 reserved */
13697 + .quad 0x0000000000000000 /* 0x5b reserved */
13698 +
13699 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13700 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13701 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13702 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13703 +
13704 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13705 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13706 +
13707 + /*
13708 + * Segments used for calling PnP BIOS have byte granularity.
13709 + * The code segments and data segments have fixed 64k limits,
13710 + * the transfer segment sizes are set at run time.
13711 + */
13712 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
13713 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
13714 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
13715 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
13716 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
13717 +
13718 + /*
13719 + * The APM segments have byte granularity and their bases
13720 + * are set at run time. All have 64k limits.
13721 + */
13722 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13723 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13724 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
13725 +
13726 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13727 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13728 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13729 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13730 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13731 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13732 +
13733 + /* Be sure this is zeroed to avoid false validations in Xen */
13734 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13735 + .endr
13736 diff -urNp linux-3.0.8/arch/x86/kernel/head_64.S linux-3.0.8/arch/x86/kernel/head_64.S
13737 --- linux-3.0.8/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
13738 +++ linux-3.0.8/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
13739 @@ -19,6 +19,7 @@
13740 #include <asm/cache.h>
13741 #include <asm/processor-flags.h>
13742 #include <asm/percpu.h>
13743 +#include <asm/cpufeature.h>
13744
13745 #ifdef CONFIG_PARAVIRT
13746 #include <asm/asm-offsets.h>
13747 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13748 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13749 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13750 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13751 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
13752 +L3_VMALLOC_START = pud_index(VMALLOC_START)
13753 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13754 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13755
13756 .text
13757 __HEAD
13758 @@ -85,35 +90,22 @@ startup_64:
13759 */
13760 addq %rbp, init_level4_pgt + 0(%rip)
13761 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13762 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13763 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13764 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13765
13766 addq %rbp, level3_ident_pgt + 0(%rip)
13767 +#ifndef CONFIG_XEN
13768 + addq %rbp, level3_ident_pgt + 8(%rip)
13769 +#endif
13770
13771 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13772 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13773 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13774
13775 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13776 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13777 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13778
13779 - /* Add an Identity mapping if I am above 1G */
13780 - leaq _text(%rip), %rdi
13781 - andq $PMD_PAGE_MASK, %rdi
13782 -
13783 - movq %rdi, %rax
13784 - shrq $PUD_SHIFT, %rax
13785 - andq $(PTRS_PER_PUD - 1), %rax
13786 - jz ident_complete
13787 -
13788 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13789 - leaq level3_ident_pgt(%rip), %rbx
13790 - movq %rdx, 0(%rbx, %rax, 8)
13791 -
13792 - movq %rdi, %rax
13793 - shrq $PMD_SHIFT, %rax
13794 - andq $(PTRS_PER_PMD - 1), %rax
13795 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13796 - leaq level2_spare_pgt(%rip), %rbx
13797 - movq %rdx, 0(%rbx, %rax, 8)
13798 -ident_complete:
13799 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13800 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13801
13802 /*
13803 * Fixup the kernel text+data virtual addresses. Note that
13804 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13805 * after the boot processor executes this code.
13806 */
13807
13808 - /* Enable PAE mode and PGE */
13809 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13810 + /* Enable PAE mode and PSE/PGE */
13811 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13812 movq %rax, %cr4
13813
13814 /* Setup early boot stage 4 level pagetables. */
13815 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13816 movl $MSR_EFER, %ecx
13817 rdmsr
13818 btsl $_EFER_SCE, %eax /* Enable System Call */
13819 - btl $20,%edi /* No Execute supported? */
13820 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13821 jnc 1f
13822 btsl $_EFER_NX, %eax
13823 + leaq init_level4_pgt(%rip), %rdi
13824 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13825 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13826 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13827 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13828 1: wrmsr /* Make changes effective */
13829
13830 /* Setup cr0 */
13831 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13832 bad_address:
13833 jmp bad_address
13834
13835 - .section ".init.text","ax"
13836 + __INIT
13837 #ifdef CONFIG_EARLY_PRINTK
13838 .globl early_idt_handlers
13839 early_idt_handlers:
13840 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13841 #endif /* EARLY_PRINTK */
13842 1: hlt
13843 jmp 1b
13844 + .previous
13845
13846 #ifdef CONFIG_EARLY_PRINTK
13847 + __INITDATA
13848 early_recursion_flag:
13849 .long 0
13850 + .previous
13851
13852 + .section .rodata,"a",@progbits
13853 early_idt_msg:
13854 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13855 early_idt_ripmsg:
13856 .asciz "RIP %s\n"
13857 -#endif /* CONFIG_EARLY_PRINTK */
13858 .previous
13859 +#endif /* CONFIG_EARLY_PRINTK */
13860
13861 + .section .rodata,"a",@progbits
13862 #define NEXT_PAGE(name) \
13863 .balign PAGE_SIZE; \
13864 ENTRY(name)
13865 @@ -338,7 +340,6 @@ ENTRY(name)
13866 i = i + 1 ; \
13867 .endr
13868
13869 - .data
13870 /*
13871 * This default setting generates an ident mapping at address 0x100000
13872 * and a mapping for the kernel that precisely maps virtual address
13873 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13874 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13875 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13876 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13877 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13878 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13879 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13880 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13881 .org init_level4_pgt + L4_START_KERNEL*8, 0
13882 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13883 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13884
13885 +#ifdef CONFIG_PAX_PER_CPU_PGD
13886 +NEXT_PAGE(cpu_pgd)
13887 + .rept NR_CPUS
13888 + .fill 512,8,0
13889 + .endr
13890 +#endif
13891 +
13892 NEXT_PAGE(level3_ident_pgt)
13893 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13894 +#ifdef CONFIG_XEN
13895 .fill 511,8,0
13896 +#else
13897 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13898 + .fill 510,8,0
13899 +#endif
13900 +
13901 +NEXT_PAGE(level3_vmalloc_pgt)
13902 + .fill 512,8,0
13903 +
13904 +NEXT_PAGE(level3_vmemmap_pgt)
13905 + .fill L3_VMEMMAP_START,8,0
13906 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13907
13908 NEXT_PAGE(level3_kernel_pgt)
13909 .fill L3_START_KERNEL,8,0
13910 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13911 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13912 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13913
13914 +NEXT_PAGE(level2_vmemmap_pgt)
13915 + .fill 512,8,0
13916 +
13917 NEXT_PAGE(level2_fixmap_pgt)
13918 - .fill 506,8,0
13919 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13920 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13921 - .fill 5,8,0
13922 + .fill 507,8,0
13923 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13924 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13925 + .fill 4,8,0
13926
13927 -NEXT_PAGE(level1_fixmap_pgt)
13928 +NEXT_PAGE(level1_vsyscall_pgt)
13929 .fill 512,8,0
13930
13931 -NEXT_PAGE(level2_ident_pgt)
13932 - /* Since I easily can, map the first 1G.
13933 + /* Since I easily can, map the first 2G.
13934 * Don't set NX because code runs from these pages.
13935 */
13936 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13937 +NEXT_PAGE(level2_ident_pgt)
13938 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13939
13940 NEXT_PAGE(level2_kernel_pgt)
13941 /*
13942 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13943 * If you want to increase this then increase MODULES_VADDR
13944 * too.)
13945 */
13946 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13947 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13948 -
13949 -NEXT_PAGE(level2_spare_pgt)
13950 - .fill 512, 8, 0
13951 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13952
13953 #undef PMDS
13954 #undef NEXT_PAGE
13955
13956 - .data
13957 + .align PAGE_SIZE
13958 +ENTRY(cpu_gdt_table)
13959 + .rept NR_CPUS
13960 + .quad 0x0000000000000000 /* NULL descriptor */
13961 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13962 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13963 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13964 + .quad 0x00cffb000000ffff /* __USER32_CS */
13965 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13966 + .quad 0x00affb000000ffff /* __USER_CS */
13967 +
13968 +#ifdef CONFIG_PAX_KERNEXEC
13969 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13970 +#else
13971 + .quad 0x0 /* unused */
13972 +#endif
13973 +
13974 + .quad 0,0 /* TSS */
13975 + .quad 0,0 /* LDT */
13976 + .quad 0,0,0 /* three TLS descriptors */
13977 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13978 + /* asm/segment.h:GDT_ENTRIES must match this */
13979 +
13980 + /* zero the remaining page */
13981 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13982 + .endr
13983 +
13984 .align 16
13985 .globl early_gdt_descr
13986 early_gdt_descr:
13987 .word GDT_ENTRIES*8-1
13988 early_gdt_descr_base:
13989 - .quad INIT_PER_CPU_VAR(gdt_page)
13990 + .quad cpu_gdt_table
13991
13992 ENTRY(phys_base)
13993 /* This must match the first entry in level2_kernel_pgt */
13994 .quad 0x0000000000000000
13995
13996 #include "../../x86/xen/xen-head.S"
13997 -
13998 - .section .bss, "aw", @nobits
13999 +
14000 + .section .rodata,"a",@progbits
14001 .align L1_CACHE_BYTES
14002 ENTRY(idt_table)
14003 - .skip IDT_ENTRIES * 16
14004 + .fill 512,8,0
14005
14006 __PAGE_ALIGNED_BSS
14007 .align PAGE_SIZE
14008 diff -urNp linux-3.0.8/arch/x86/kernel/i386_ksyms_32.c linux-3.0.8/arch/x86/kernel/i386_ksyms_32.c
14009 --- linux-3.0.8/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
14010 +++ linux-3.0.8/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
14011 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14012 EXPORT_SYMBOL(cmpxchg8b_emu);
14013 #endif
14014
14015 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14016 +
14017 /* Networking helper routines. */
14018 EXPORT_SYMBOL(csum_partial_copy_generic);
14019 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14020 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14021
14022 EXPORT_SYMBOL(__get_user_1);
14023 EXPORT_SYMBOL(__get_user_2);
14024 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14025
14026 EXPORT_SYMBOL(csum_partial);
14027 EXPORT_SYMBOL(empty_zero_page);
14028 +
14029 +#ifdef CONFIG_PAX_KERNEXEC
14030 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14031 +#endif
14032 diff -urNp linux-3.0.8/arch/x86/kernel/i8259.c linux-3.0.8/arch/x86/kernel/i8259.c
14033 --- linux-3.0.8/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
14034 +++ linux-3.0.8/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
14035 @@ -210,7 +210,7 @@ spurious_8259A_irq:
14036 "spurious 8259A interrupt: IRQ%d.\n", irq);
14037 spurious_irq_mask |= irqmask;
14038 }
14039 - atomic_inc(&irq_err_count);
14040 + atomic_inc_unchecked(&irq_err_count);
14041 /*
14042 * Theoretically we do not have to handle this IRQ,
14043 * but in Linux this does not cause problems and is
14044 diff -urNp linux-3.0.8/arch/x86/kernel/init_task.c linux-3.0.8/arch/x86/kernel/init_task.c
14045 --- linux-3.0.8/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
14046 +++ linux-3.0.8/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
14047 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14048 * way process stacks are handled. This is done by having a special
14049 * "init_task" linker map entry..
14050 */
14051 -union thread_union init_thread_union __init_task_data =
14052 - { INIT_THREAD_INFO(init_task) };
14053 +union thread_union init_thread_union __init_task_data;
14054
14055 /*
14056 * Initial task structure.
14057 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14058 * section. Since TSS's are completely CPU-local, we want them
14059 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14060 */
14061 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14062 -
14063 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14064 +EXPORT_SYMBOL(init_tss);
14065 diff -urNp linux-3.0.8/arch/x86/kernel/ioport.c linux-3.0.8/arch/x86/kernel/ioport.c
14066 --- linux-3.0.8/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
14067 +++ linux-3.0.8/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
14068 @@ -6,6 +6,7 @@
14069 #include <linux/sched.h>
14070 #include <linux/kernel.h>
14071 #include <linux/capability.h>
14072 +#include <linux/security.h>
14073 #include <linux/errno.h>
14074 #include <linux/types.h>
14075 #include <linux/ioport.h>
14076 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
14077
14078 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14079 return -EINVAL;
14080 +#ifdef CONFIG_GRKERNSEC_IO
14081 + if (turn_on && grsec_disable_privio) {
14082 + gr_handle_ioperm();
14083 + return -EPERM;
14084 + }
14085 +#endif
14086 if (turn_on && !capable(CAP_SYS_RAWIO))
14087 return -EPERM;
14088
14089 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
14090 * because the ->io_bitmap_max value must match the bitmap
14091 * contents:
14092 */
14093 - tss = &per_cpu(init_tss, get_cpu());
14094 + tss = init_tss + get_cpu();
14095
14096 if (turn_on)
14097 bitmap_clear(t->io_bitmap_ptr, from, num);
14098 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
14099 return -EINVAL;
14100 /* Trying to gain more privileges? */
14101 if (level > old) {
14102 +#ifdef CONFIG_GRKERNSEC_IO
14103 + if (grsec_disable_privio) {
14104 + gr_handle_iopl();
14105 + return -EPERM;
14106 + }
14107 +#endif
14108 if (!capable(CAP_SYS_RAWIO))
14109 return -EPERM;
14110 }
14111 diff -urNp linux-3.0.8/arch/x86/kernel/irq_32.c linux-3.0.8/arch/x86/kernel/irq_32.c
14112 --- linux-3.0.8/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
14113 +++ linux-3.0.8/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
14114 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
14115 __asm__ __volatile__("andl %%esp,%0" :
14116 "=r" (sp) : "0" (THREAD_SIZE - 1));
14117
14118 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14119 + return sp < STACK_WARN;
14120 }
14121
14122 static void print_stack_overflow(void)
14123 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
14124 * per-CPU IRQ handling contexts (thread information and stack)
14125 */
14126 union irq_ctx {
14127 - struct thread_info tinfo;
14128 - u32 stack[THREAD_SIZE/sizeof(u32)];
14129 + unsigned long previous_esp;
14130 + u32 stack[THREAD_SIZE/sizeof(u32)];
14131 } __attribute__((aligned(THREAD_SIZE)));
14132
14133 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14134 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
14135 static inline int
14136 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14137 {
14138 - union irq_ctx *curctx, *irqctx;
14139 + union irq_ctx *irqctx;
14140 u32 *isp, arg1, arg2;
14141
14142 - curctx = (union irq_ctx *) current_thread_info();
14143 irqctx = __this_cpu_read(hardirq_ctx);
14144
14145 /*
14146 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
14147 * handler) we can't do that and just have to keep using the
14148 * current stack (which is the irq stack already after all)
14149 */
14150 - if (unlikely(curctx == irqctx))
14151 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14152 return 0;
14153
14154 /* build the stack frame on the IRQ stack */
14155 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14156 - irqctx->tinfo.task = curctx->tinfo.task;
14157 - irqctx->tinfo.previous_esp = current_stack_pointer;
14158 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14159 + irqctx->previous_esp = current_stack_pointer;
14160
14161 - /*
14162 - * Copy the softirq bits in preempt_count so that the
14163 - * softirq checks work in the hardirq context.
14164 - */
14165 - irqctx->tinfo.preempt_count =
14166 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14167 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14168 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14169 + __set_fs(MAKE_MM_SEG(0));
14170 +#endif
14171
14172 if (unlikely(overflow))
14173 call_on_stack(print_stack_overflow, isp);
14174 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
14175 : "0" (irq), "1" (desc), "2" (isp),
14176 "D" (desc->handle_irq)
14177 : "memory", "cc", "ecx");
14178 +
14179 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14180 + __set_fs(current_thread_info()->addr_limit);
14181 +#endif
14182 +
14183 return 1;
14184 }
14185
14186 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
14187 */
14188 void __cpuinit irq_ctx_init(int cpu)
14189 {
14190 - union irq_ctx *irqctx;
14191 -
14192 if (per_cpu(hardirq_ctx, cpu))
14193 return;
14194
14195 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14196 - THREAD_FLAGS,
14197 - THREAD_ORDER));
14198 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14199 - irqctx->tinfo.cpu = cpu;
14200 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14201 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14202 -
14203 - per_cpu(hardirq_ctx, cpu) = irqctx;
14204 -
14205 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14206 - THREAD_FLAGS,
14207 - THREAD_ORDER));
14208 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14209 - irqctx->tinfo.cpu = cpu;
14210 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14211 -
14212 - per_cpu(softirq_ctx, cpu) = irqctx;
14213 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14214 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14215
14216 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14217 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14218 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
14219 asmlinkage void do_softirq(void)
14220 {
14221 unsigned long flags;
14222 - struct thread_info *curctx;
14223 union irq_ctx *irqctx;
14224 u32 *isp;
14225
14226 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
14227 local_irq_save(flags);
14228
14229 if (local_softirq_pending()) {
14230 - curctx = current_thread_info();
14231 irqctx = __this_cpu_read(softirq_ctx);
14232 - irqctx->tinfo.task = curctx->task;
14233 - irqctx->tinfo.previous_esp = current_stack_pointer;
14234 + irqctx->previous_esp = current_stack_pointer;
14235
14236 /* build the stack frame on the softirq stack */
14237 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14238 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14239 +
14240 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14241 + __set_fs(MAKE_MM_SEG(0));
14242 +#endif
14243
14244 call_on_stack(__do_softirq, isp);
14245 +
14246 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14247 + __set_fs(current_thread_info()->addr_limit);
14248 +#endif
14249 +
14250 /*
14251 * Shouldn't happen, we returned above if in_interrupt():
14252 */
14253 diff -urNp linux-3.0.8/arch/x86/kernel/irq.c linux-3.0.8/arch/x86/kernel/irq.c
14254 --- linux-3.0.8/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
14255 +++ linux-3.0.8/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
14256 @@ -17,7 +17,7 @@
14257 #include <asm/mce.h>
14258 #include <asm/hw_irq.h>
14259
14260 -atomic_t irq_err_count;
14261 +atomic_unchecked_t irq_err_count;
14262
14263 /* Function pointer for generic interrupt vector handling */
14264 void (*x86_platform_ipi_callback)(void) = NULL;
14265 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
14266 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14267 seq_printf(p, " Machine check polls\n");
14268 #endif
14269 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14270 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14271 #if defined(CONFIG_X86_IO_APIC)
14272 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14273 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14274 #endif
14275 return 0;
14276 }
14277 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14278
14279 u64 arch_irq_stat(void)
14280 {
14281 - u64 sum = atomic_read(&irq_err_count);
14282 + u64 sum = atomic_read_unchecked(&irq_err_count);
14283
14284 #ifdef CONFIG_X86_IO_APIC
14285 - sum += atomic_read(&irq_mis_count);
14286 + sum += atomic_read_unchecked(&irq_mis_count);
14287 #endif
14288 return sum;
14289 }
14290 diff -urNp linux-3.0.8/arch/x86/kernel/kgdb.c linux-3.0.8/arch/x86/kernel/kgdb.c
14291 --- linux-3.0.8/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
14292 +++ linux-3.0.8/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
14293 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
14294 #ifdef CONFIG_X86_32
14295 switch (regno) {
14296 case GDB_SS:
14297 - if (!user_mode_vm(regs))
14298 + if (!user_mode(regs))
14299 *(unsigned long *)mem = __KERNEL_DS;
14300 break;
14301 case GDB_SP:
14302 - if (!user_mode_vm(regs))
14303 + if (!user_mode(regs))
14304 *(unsigned long *)mem = kernel_stack_pointer(regs);
14305 break;
14306 case GDB_GS:
14307 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
14308 case 'k':
14309 /* clear the trace bit */
14310 linux_regs->flags &= ~X86_EFLAGS_TF;
14311 - atomic_set(&kgdb_cpu_doing_single_step, -1);
14312 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14313
14314 /* set the trace bit if we're stepping */
14315 if (remcomInBuffer[0] == 's') {
14316 linux_regs->flags |= X86_EFLAGS_TF;
14317 - atomic_set(&kgdb_cpu_doing_single_step,
14318 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14319 raw_smp_processor_id());
14320 }
14321
14322 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
14323 return NOTIFY_DONE;
14324
14325 case DIE_DEBUG:
14326 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
14327 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
14328 if (user_mode(regs))
14329 return single_step_cont(regs, args);
14330 break;
14331 diff -urNp linux-3.0.8/arch/x86/kernel/kprobes.c linux-3.0.8/arch/x86/kernel/kprobes.c
14332 --- linux-3.0.8/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
14333 +++ linux-3.0.8/arch/x86/kernel/kprobes.c 2011-10-11 10:44:33.000000000 -0400
14334 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
14335 } __attribute__((packed)) *insn;
14336
14337 insn = (struct __arch_relative_insn *)from;
14338 +
14339 + pax_open_kernel();
14340 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
14341 insn->op = op;
14342 + pax_close_kernel();
14343 }
14344
14345 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
14346 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
14347 kprobe_opcode_t opcode;
14348 kprobe_opcode_t *orig_opcodes = opcodes;
14349
14350 - if (search_exception_tables((unsigned long)opcodes))
14351 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14352 return 0; /* Page fault may occur on this address. */
14353
14354 retry:
14355 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
14356 }
14357 }
14358 insn_get_length(&insn);
14359 + pax_open_kernel();
14360 memcpy(dest, insn.kaddr, insn.length);
14361 + pax_close_kernel();
14362
14363 #ifdef CONFIG_X86_64
14364 if (insn_rip_relative(&insn)) {
14365 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
14366 (u8 *) dest;
14367 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
14368 disp = (u8 *) dest + insn_offset_displacement(&insn);
14369 + pax_open_kernel();
14370 *(s32 *) disp = (s32) newdisp;
14371 + pax_close_kernel();
14372 }
14373 #endif
14374 return insn.length;
14375 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
14376 */
14377 __copy_instruction(p->ainsn.insn, p->addr, 0);
14378
14379 - if (can_boost(p->addr))
14380 + if (can_boost(ktla_ktva(p->addr)))
14381 p->ainsn.boostable = 0;
14382 else
14383 p->ainsn.boostable = -1;
14384
14385 - p->opcode = *p->addr;
14386 + p->opcode = *(ktla_ktva(p->addr));
14387 }
14388
14389 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14390 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
14391 * nor set current_kprobe, because it doesn't use single
14392 * stepping.
14393 */
14394 - regs->ip = (unsigned long)p->ainsn.insn;
14395 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14396 preempt_enable_no_resched();
14397 return;
14398 }
14399 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
14400 if (p->opcode == BREAKPOINT_INSTRUCTION)
14401 regs->ip = (unsigned long)p->addr;
14402 else
14403 - regs->ip = (unsigned long)p->ainsn.insn;
14404 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14405 }
14406
14407 /*
14408 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
14409 setup_singlestep(p, regs, kcb, 0);
14410 return 1;
14411 }
14412 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
14413 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14414 /*
14415 * The breakpoint instruction was removed right
14416 * after we hit it. Another cpu has removed
14417 @@ -680,6 +687,9 @@ static void __used __kprobes kretprobe_t
14418 " movq %rax, 152(%rsp)\n"
14419 RESTORE_REGS_STRING
14420 " popfq\n"
14421 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
14422 + " btsq $63,(%rsp)\n"
14423 +#endif
14424 #else
14425 " pushf\n"
14426 SAVE_REGS_STRING
14427 @@ -817,7 +827,7 @@ static void __kprobes resume_execution(s
14428 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14429 {
14430 unsigned long *tos = stack_addr(regs);
14431 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14432 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14433 unsigned long orig_ip = (unsigned long)p->addr;
14434 kprobe_opcode_t *insn = p->ainsn.insn;
14435
14436 @@ -999,7 +1009,7 @@ int __kprobes kprobe_exceptions_notify(s
14437 struct die_args *args = data;
14438 int ret = NOTIFY_DONE;
14439
14440 - if (args->regs && user_mode_vm(args->regs))
14441 + if (args->regs && user_mode(args->regs))
14442 return ret;
14443
14444 switch (val) {
14445 @@ -1381,7 +1391,7 @@ int __kprobes arch_prepare_optimized_kpr
14446 * Verify if the address gap is in 2GB range, because this uses
14447 * a relative jump.
14448 */
14449 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
14450 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
14451 if (abs(rel) > 0x7fffffff)
14452 return -ERANGE;
14453
14454 @@ -1402,11 +1412,11 @@ int __kprobes arch_prepare_optimized_kpr
14455 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
14456
14457 /* Set probe function call */
14458 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
14459 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
14460
14461 /* Set returning jmp instruction at the tail of out-of-line buffer */
14462 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
14463 - (u8 *)op->kp.addr + op->optinsn.size);
14464 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
14465
14466 flush_icache_range((unsigned long) buf,
14467 (unsigned long) buf + TMPL_END_IDX +
14468 @@ -1428,7 +1438,7 @@ static void __kprobes setup_optimize_kpr
14469 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
14470
14471 /* Backup instructions which will be replaced by jump address */
14472 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
14473 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
14474 RELATIVE_ADDR_SIZE);
14475
14476 insn_buf[0] = RELATIVEJUMP_OPCODE;
14477 diff -urNp linux-3.0.8/arch/x86/kernel/kvm.c linux-3.0.8/arch/x86/kernel/kvm.c
14478 --- linux-3.0.8/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
14479 +++ linux-3.0.8/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
14480 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
14481 pv_mmu_ops.set_pud = kvm_set_pud;
14482 #if PAGETABLE_LEVELS == 4
14483 pv_mmu_ops.set_pgd = kvm_set_pgd;
14484 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
14485 #endif
14486 #endif
14487 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
14488 diff -urNp linux-3.0.8/arch/x86/kernel/ldt.c linux-3.0.8/arch/x86/kernel/ldt.c
14489 --- linux-3.0.8/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
14490 +++ linux-3.0.8/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
14491 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
14492 if (reload) {
14493 #ifdef CONFIG_SMP
14494 preempt_disable();
14495 - load_LDT(pc);
14496 + load_LDT_nolock(pc);
14497 if (!cpumask_equal(mm_cpumask(current->mm),
14498 cpumask_of(smp_processor_id())))
14499 smp_call_function(flush_ldt, current->mm, 1);
14500 preempt_enable();
14501 #else
14502 - load_LDT(pc);
14503 + load_LDT_nolock(pc);
14504 #endif
14505 }
14506 if (oldsize) {
14507 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
14508 return err;
14509
14510 for (i = 0; i < old->size; i++)
14511 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14512 + write_ldt_entry(new->ldt, i, old->ldt + i);
14513 return 0;
14514 }
14515
14516 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
14517 retval = copy_ldt(&mm->context, &old_mm->context);
14518 mutex_unlock(&old_mm->context.lock);
14519 }
14520 +
14521 + if (tsk == current) {
14522 + mm->context.vdso = 0;
14523 +
14524 +#ifdef CONFIG_X86_32
14525 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14526 + mm->context.user_cs_base = 0UL;
14527 + mm->context.user_cs_limit = ~0UL;
14528 +
14529 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14530 + cpus_clear(mm->context.cpu_user_cs_mask);
14531 +#endif
14532 +
14533 +#endif
14534 +#endif
14535 +
14536 + }
14537 +
14538 return retval;
14539 }
14540
14541 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14542 }
14543 }
14544
14545 +#ifdef CONFIG_PAX_SEGMEXEC
14546 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14547 + error = -EINVAL;
14548 + goto out_unlock;
14549 + }
14550 +#endif
14551 +
14552 fill_ldt(&ldt, &ldt_info);
14553 if (oldmode)
14554 ldt.avl = 0;
14555 diff -urNp linux-3.0.8/arch/x86/kernel/machine_kexec_32.c linux-3.0.8/arch/x86/kernel/machine_kexec_32.c
14556 --- linux-3.0.8/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
14557 +++ linux-3.0.8/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
14558 @@ -27,7 +27,7 @@
14559 #include <asm/cacheflush.h>
14560 #include <asm/debugreg.h>
14561
14562 -static void set_idt(void *newidt, __u16 limit)
14563 +static void set_idt(struct desc_struct *newidt, __u16 limit)
14564 {
14565 struct desc_ptr curidt;
14566
14567 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14568 }
14569
14570
14571 -static void set_gdt(void *newgdt, __u16 limit)
14572 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14573 {
14574 struct desc_ptr curgdt;
14575
14576 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14577 }
14578
14579 control_page = page_address(image->control_code_page);
14580 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14581 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14582
14583 relocate_kernel_ptr = control_page;
14584 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14585 diff -urNp linux-3.0.8/arch/x86/kernel/microcode_intel.c linux-3.0.8/arch/x86/kernel/microcode_intel.c
14586 --- linux-3.0.8/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
14587 +++ linux-3.0.8/arch/x86/kernel/microcode_intel.c 2011-10-06 04:17:55.000000000 -0400
14588 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14589
14590 static int get_ucode_user(void *to, const void *from, size_t n)
14591 {
14592 - return copy_from_user(to, from, n);
14593 + return copy_from_user(to, (const void __force_user *)from, n);
14594 }
14595
14596 static enum ucode_state
14597 request_microcode_user(int cpu, const void __user *buf, size_t size)
14598 {
14599 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14600 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14601 }
14602
14603 static void microcode_fini_cpu(int cpu)
14604 diff -urNp linux-3.0.8/arch/x86/kernel/module.c linux-3.0.8/arch/x86/kernel/module.c
14605 --- linux-3.0.8/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
14606 +++ linux-3.0.8/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
14607 @@ -36,21 +36,66 @@
14608 #define DEBUGP(fmt...)
14609 #endif
14610
14611 -void *module_alloc(unsigned long size)
14612 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14613 {
14614 if (PAGE_ALIGN(size) > MODULES_LEN)
14615 return NULL;
14616 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14617 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14618 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14619 -1, __builtin_return_address(0));
14620 }
14621
14622 +void *module_alloc(unsigned long size)
14623 +{
14624 +
14625 +#ifdef CONFIG_PAX_KERNEXEC
14626 + return __module_alloc(size, PAGE_KERNEL);
14627 +#else
14628 + return __module_alloc(size, PAGE_KERNEL_EXEC);
14629 +#endif
14630 +
14631 +}
14632 +
14633 /* Free memory returned from module_alloc */
14634 void module_free(struct module *mod, void *module_region)
14635 {
14636 vfree(module_region);
14637 }
14638
14639 +#ifdef CONFIG_PAX_KERNEXEC
14640 +#ifdef CONFIG_X86_32
14641 +void *module_alloc_exec(unsigned long size)
14642 +{
14643 + struct vm_struct *area;
14644 +
14645 + if (size == 0)
14646 + return NULL;
14647 +
14648 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14649 + return area ? area->addr : NULL;
14650 +}
14651 +EXPORT_SYMBOL(module_alloc_exec);
14652 +
14653 +void module_free_exec(struct module *mod, void *module_region)
14654 +{
14655 + vunmap(module_region);
14656 +}
14657 +EXPORT_SYMBOL(module_free_exec);
14658 +#else
14659 +void module_free_exec(struct module *mod, void *module_region)
14660 +{
14661 + module_free(mod, module_region);
14662 +}
14663 +EXPORT_SYMBOL(module_free_exec);
14664 +
14665 +void *module_alloc_exec(unsigned long size)
14666 +{
14667 + return __module_alloc(size, PAGE_KERNEL_RX);
14668 +}
14669 +EXPORT_SYMBOL(module_alloc_exec);
14670 +#endif
14671 +#endif
14672 +
14673 /* We don't need anything special. */
14674 int module_frob_arch_sections(Elf_Ehdr *hdr,
14675 Elf_Shdr *sechdrs,
14676 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14677 unsigned int i;
14678 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14679 Elf32_Sym *sym;
14680 - uint32_t *location;
14681 + uint32_t *plocation, location;
14682
14683 DEBUGP("Applying relocate section %u to %u\n", relsec,
14684 sechdrs[relsec].sh_info);
14685 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14686 /* This is where to make the change */
14687 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14688 - + rel[i].r_offset;
14689 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14690 + location = (uint32_t)plocation;
14691 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14692 + plocation = ktla_ktva((void *)plocation);
14693 /* This is the symbol it is referring to. Note that all
14694 undefined symbols have been resolved. */
14695 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14696 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14697 switch (ELF32_R_TYPE(rel[i].r_info)) {
14698 case R_386_32:
14699 /* We add the value into the location given */
14700 - *location += sym->st_value;
14701 + pax_open_kernel();
14702 + *plocation += sym->st_value;
14703 + pax_close_kernel();
14704 break;
14705 case R_386_PC32:
14706 /* Add the value, subtract its postition */
14707 - *location += sym->st_value - (uint32_t)location;
14708 + pax_open_kernel();
14709 + *plocation += sym->st_value - location;
14710 + pax_close_kernel();
14711 break;
14712 default:
14713 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14714 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14715 case R_X86_64_NONE:
14716 break;
14717 case R_X86_64_64:
14718 + pax_open_kernel();
14719 *(u64 *)loc = val;
14720 + pax_close_kernel();
14721 break;
14722 case R_X86_64_32:
14723 + pax_open_kernel();
14724 *(u32 *)loc = val;
14725 + pax_close_kernel();
14726 if (val != *(u32 *)loc)
14727 goto overflow;
14728 break;
14729 case R_X86_64_32S:
14730 + pax_open_kernel();
14731 *(s32 *)loc = val;
14732 + pax_close_kernel();
14733 if ((s64)val != *(s32 *)loc)
14734 goto overflow;
14735 break;
14736 case R_X86_64_PC32:
14737 val -= (u64)loc;
14738 + pax_open_kernel();
14739 *(u32 *)loc = val;
14740 + pax_close_kernel();
14741 +
14742 #if 0
14743 if ((s64)val != *(s32 *)loc)
14744 goto overflow;
14745 diff -urNp linux-3.0.8/arch/x86/kernel/paravirt.c linux-3.0.8/arch/x86/kernel/paravirt.c
14746 --- linux-3.0.8/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
14747 +++ linux-3.0.8/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
14748 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14749 {
14750 return x;
14751 }
14752 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14753 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14754 +#endif
14755
14756 void __init default_banner(void)
14757 {
14758 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14759 * corresponding structure. */
14760 static void *get_call_destination(u8 type)
14761 {
14762 - struct paravirt_patch_template tmpl = {
14763 + const struct paravirt_patch_template tmpl = {
14764 .pv_init_ops = pv_init_ops,
14765 .pv_time_ops = pv_time_ops,
14766 .pv_cpu_ops = pv_cpu_ops,
14767 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14768 .pv_lock_ops = pv_lock_ops,
14769 #endif
14770 };
14771 +
14772 + pax_track_stack();
14773 +
14774 return *((void **)&tmpl + type);
14775 }
14776
14777 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14778 if (opfunc == NULL)
14779 /* If there's no function, patch it with a ud2a (BUG) */
14780 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14781 - else if (opfunc == _paravirt_nop)
14782 + else if (opfunc == (void *)_paravirt_nop)
14783 /* If the operation is a nop, then nop the callsite */
14784 ret = paravirt_patch_nop();
14785
14786 /* identity functions just return their single argument */
14787 - else if (opfunc == _paravirt_ident_32)
14788 + else if (opfunc == (void *)_paravirt_ident_32)
14789 ret = paravirt_patch_ident_32(insnbuf, len);
14790 - else if (opfunc == _paravirt_ident_64)
14791 + else if (opfunc == (void *)_paravirt_ident_64)
14792 ret = paravirt_patch_ident_64(insnbuf, len);
14793 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14794 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14795 + ret = paravirt_patch_ident_64(insnbuf, len);
14796 +#endif
14797
14798 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14799 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14800 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14801 if (insn_len > len || start == NULL)
14802 insn_len = len;
14803 else
14804 - memcpy(insnbuf, start, insn_len);
14805 + memcpy(insnbuf, ktla_ktva(start), insn_len);
14806
14807 return insn_len;
14808 }
14809 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
14810 preempt_enable();
14811 }
14812
14813 -struct pv_info pv_info = {
14814 +struct pv_info pv_info __read_only = {
14815 .name = "bare hardware",
14816 .paravirt_enabled = 0,
14817 .kernel_rpl = 0,
14818 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
14819 };
14820
14821 -struct pv_init_ops pv_init_ops = {
14822 +struct pv_init_ops pv_init_ops __read_only = {
14823 .patch = native_patch,
14824 };
14825
14826 -struct pv_time_ops pv_time_ops = {
14827 +struct pv_time_ops pv_time_ops __read_only = {
14828 .sched_clock = native_sched_clock,
14829 };
14830
14831 -struct pv_irq_ops pv_irq_ops = {
14832 +struct pv_irq_ops pv_irq_ops __read_only = {
14833 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14834 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14835 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14836 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14837 #endif
14838 };
14839
14840 -struct pv_cpu_ops pv_cpu_ops = {
14841 +struct pv_cpu_ops pv_cpu_ops __read_only = {
14842 .cpuid = native_cpuid,
14843 .get_debugreg = native_get_debugreg,
14844 .set_debugreg = native_set_debugreg,
14845 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14846 .end_context_switch = paravirt_nop,
14847 };
14848
14849 -struct pv_apic_ops pv_apic_ops = {
14850 +struct pv_apic_ops pv_apic_ops __read_only = {
14851 #ifdef CONFIG_X86_LOCAL_APIC
14852 .startup_ipi_hook = paravirt_nop,
14853 #endif
14854 };
14855
14856 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14857 +#ifdef CONFIG_X86_32
14858 +#ifdef CONFIG_X86_PAE
14859 +/* 64-bit pagetable entries */
14860 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14861 +#else
14862 /* 32-bit pagetable entries */
14863 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14864 +#endif
14865 #else
14866 /* 64-bit pagetable entries */
14867 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14868 #endif
14869
14870 -struct pv_mmu_ops pv_mmu_ops = {
14871 +struct pv_mmu_ops pv_mmu_ops __read_only = {
14872
14873 .read_cr2 = native_read_cr2,
14874 .write_cr2 = native_write_cr2,
14875 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14876 .make_pud = PTE_IDENT,
14877
14878 .set_pgd = native_set_pgd,
14879 + .set_pgd_batched = native_set_pgd_batched,
14880 #endif
14881 #endif /* PAGETABLE_LEVELS >= 3 */
14882
14883 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14884 },
14885
14886 .set_fixmap = native_set_fixmap,
14887 +
14888 +#ifdef CONFIG_PAX_KERNEXEC
14889 + .pax_open_kernel = native_pax_open_kernel,
14890 + .pax_close_kernel = native_pax_close_kernel,
14891 +#endif
14892 +
14893 };
14894
14895 EXPORT_SYMBOL_GPL(pv_time_ops);
14896 diff -urNp linux-3.0.8/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.8/arch/x86/kernel/paravirt-spinlocks.c
14897 --- linux-3.0.8/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14898 +++ linux-3.0.8/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14899 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14900 arch_spin_lock(lock);
14901 }
14902
14903 -struct pv_lock_ops pv_lock_ops = {
14904 +struct pv_lock_ops pv_lock_ops __read_only = {
14905 #ifdef CONFIG_SMP
14906 .spin_is_locked = __ticket_spin_is_locked,
14907 .spin_is_contended = __ticket_spin_is_contended,
14908 diff -urNp linux-3.0.8/arch/x86/kernel/pci-iommu_table.c linux-3.0.8/arch/x86/kernel/pci-iommu_table.c
14909 --- linux-3.0.8/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14910 +++ linux-3.0.8/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14911 @@ -2,7 +2,7 @@
14912 #include <asm/iommu_table.h>
14913 #include <linux/string.h>
14914 #include <linux/kallsyms.h>
14915 -
14916 +#include <linux/sched.h>
14917
14918 #define DEBUG 1
14919
14920 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14921 {
14922 struct iommu_table_entry *p, *q, *x;
14923
14924 + pax_track_stack();
14925 +
14926 /* Simple cyclic dependency checker. */
14927 for (p = start; p < finish; p++) {
14928 q = find_dependents_of(start, finish, p);
14929 diff -urNp linux-3.0.8/arch/x86/kernel/process_32.c linux-3.0.8/arch/x86/kernel/process_32.c
14930 --- linux-3.0.8/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14931 +++ linux-3.0.8/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14932 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14933 unsigned long thread_saved_pc(struct task_struct *tsk)
14934 {
14935 return ((unsigned long *)tsk->thread.sp)[3];
14936 +//XXX return tsk->thread.eip;
14937 }
14938
14939 #ifndef CONFIG_SMP
14940 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14941 unsigned long sp;
14942 unsigned short ss, gs;
14943
14944 - if (user_mode_vm(regs)) {
14945 + if (user_mode(regs)) {
14946 sp = regs->sp;
14947 ss = regs->ss & 0xffff;
14948 - gs = get_user_gs(regs);
14949 } else {
14950 sp = kernel_stack_pointer(regs);
14951 savesegment(ss, ss);
14952 - savesegment(gs, gs);
14953 }
14954 + gs = get_user_gs(regs);
14955
14956 show_regs_common();
14957
14958 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14959 struct task_struct *tsk;
14960 int err;
14961
14962 - childregs = task_pt_regs(p);
14963 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14964 *childregs = *regs;
14965 childregs->ax = 0;
14966 childregs->sp = sp;
14967
14968 p->thread.sp = (unsigned long) childregs;
14969 p->thread.sp0 = (unsigned long) (childregs+1);
14970 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14971
14972 p->thread.ip = (unsigned long) ret_from_fork;
14973
14974 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14975 struct thread_struct *prev = &prev_p->thread,
14976 *next = &next_p->thread;
14977 int cpu = smp_processor_id();
14978 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14979 + struct tss_struct *tss = init_tss + cpu;
14980 bool preload_fpu;
14981
14982 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14983 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14984 */
14985 lazy_save_gs(prev->gs);
14986
14987 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14988 + __set_fs(task_thread_info(next_p)->addr_limit);
14989 +#endif
14990 +
14991 /*
14992 * Load the per-thread Thread-Local Storage descriptor.
14993 */
14994 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14995 */
14996 arch_end_context_switch(next_p);
14997
14998 + percpu_write(current_task, next_p);
14999 + percpu_write(current_tinfo, &next_p->tinfo);
15000 +
15001 if (preload_fpu)
15002 __math_state_restore();
15003
15004 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
15005 if (prev->gs | next->gs)
15006 lazy_load_gs(next->gs);
15007
15008 - percpu_write(current_task, next_p);
15009 -
15010 return prev_p;
15011 }
15012
15013 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
15014 } while (count++ < 16);
15015 return 0;
15016 }
15017 -
15018 diff -urNp linux-3.0.8/arch/x86/kernel/process_64.c linux-3.0.8/arch/x86/kernel/process_64.c
15019 --- linux-3.0.8/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
15020 +++ linux-3.0.8/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
15021 @@ -87,7 +87,7 @@ static void __exit_idle(void)
15022 void exit_idle(void)
15023 {
15024 /* idle loop has pid 0 */
15025 - if (current->pid)
15026 + if (task_pid_nr(current))
15027 return;
15028 __exit_idle();
15029 }
15030 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
15031 struct pt_regs *childregs;
15032 struct task_struct *me = current;
15033
15034 - childregs = ((struct pt_regs *)
15035 - (THREAD_SIZE + task_stack_page(p))) - 1;
15036 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15037 *childregs = *regs;
15038
15039 childregs->ax = 0;
15040 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
15041 p->thread.sp = (unsigned long) childregs;
15042 p->thread.sp0 = (unsigned long) (childregs+1);
15043 p->thread.usersp = me->thread.usersp;
15044 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15045
15046 set_tsk_thread_flag(p, TIF_FORK);
15047
15048 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
15049 struct thread_struct *prev = &prev_p->thread;
15050 struct thread_struct *next = &next_p->thread;
15051 int cpu = smp_processor_id();
15052 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15053 + struct tss_struct *tss = init_tss + cpu;
15054 unsigned fsindex, gsindex;
15055 bool preload_fpu;
15056
15057 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
15058 prev->usersp = percpu_read(old_rsp);
15059 percpu_write(old_rsp, next->usersp);
15060 percpu_write(current_task, next_p);
15061 + percpu_write(current_tinfo, &next_p->tinfo);
15062
15063 - percpu_write(kernel_stack,
15064 - (unsigned long)task_stack_page(next_p) +
15065 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15066 + percpu_write(kernel_stack, next->sp0);
15067
15068 /*
15069 * Now maybe reload the debug registers and handle I/O bitmaps
15070 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
15071 if (!p || p == current || p->state == TASK_RUNNING)
15072 return 0;
15073 stack = (unsigned long)task_stack_page(p);
15074 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15075 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15076 return 0;
15077 fp = *(u64 *)(p->thread.sp);
15078 do {
15079 - if (fp < (unsigned long)stack ||
15080 - fp >= (unsigned long)stack+THREAD_SIZE)
15081 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15082 return 0;
15083 ip = *(u64 *)(fp+8);
15084 if (!in_sched_functions(ip))
15085 diff -urNp linux-3.0.8/arch/x86/kernel/process.c linux-3.0.8/arch/x86/kernel/process.c
15086 --- linux-3.0.8/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
15087 +++ linux-3.0.8/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
15088 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
15089
15090 void free_thread_info(struct thread_info *ti)
15091 {
15092 - free_thread_xstate(ti->task);
15093 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15094 }
15095
15096 +static struct kmem_cache *task_struct_cachep;
15097 +
15098 void arch_task_cache_init(void)
15099 {
15100 - task_xstate_cachep =
15101 - kmem_cache_create("task_xstate", xstate_size,
15102 + /* create a slab on which task_structs can be allocated */
15103 + task_struct_cachep =
15104 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15105 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15106 +
15107 + task_xstate_cachep =
15108 + kmem_cache_create("task_xstate", xstate_size,
15109 __alignof__(union thread_xstate),
15110 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15111 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15112 +}
15113 +
15114 +struct task_struct *alloc_task_struct_node(int node)
15115 +{
15116 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
15117 +}
15118 +
15119 +void free_task_struct(struct task_struct *task)
15120 +{
15121 + free_thread_xstate(task);
15122 + kmem_cache_free(task_struct_cachep, task);
15123 }
15124
15125 /*
15126 @@ -70,7 +87,7 @@ void exit_thread(void)
15127 unsigned long *bp = t->io_bitmap_ptr;
15128
15129 if (bp) {
15130 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15131 + struct tss_struct *tss = init_tss + get_cpu();
15132
15133 t->io_bitmap_ptr = NULL;
15134 clear_thread_flag(TIF_IO_BITMAP);
15135 @@ -106,7 +123,7 @@ void show_regs_common(void)
15136
15137 printk(KERN_CONT "\n");
15138 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
15139 - current->pid, current->comm, print_tainted(),
15140 + task_pid_nr(current), current->comm, print_tainted(),
15141 init_utsname()->release,
15142 (int)strcspn(init_utsname()->version, " "),
15143 init_utsname()->version);
15144 @@ -120,6 +137,9 @@ void flush_thread(void)
15145 {
15146 struct task_struct *tsk = current;
15147
15148 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15149 + loadsegment(gs, 0);
15150 +#endif
15151 flush_ptrace_hw_breakpoint(tsk);
15152 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
15153 /*
15154 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
15155 regs.di = (unsigned long) arg;
15156
15157 #ifdef CONFIG_X86_32
15158 - regs.ds = __USER_DS;
15159 - regs.es = __USER_DS;
15160 + regs.ds = __KERNEL_DS;
15161 + regs.es = __KERNEL_DS;
15162 regs.fs = __KERNEL_PERCPU;
15163 - regs.gs = __KERNEL_STACK_CANARY;
15164 + savesegment(gs, regs.gs);
15165 #else
15166 regs.ss = __KERNEL_DS;
15167 #endif
15168 @@ -403,7 +423,7 @@ void default_idle(void)
15169 EXPORT_SYMBOL(default_idle);
15170 #endif
15171
15172 -void stop_this_cpu(void *dummy)
15173 +__noreturn void stop_this_cpu(void *dummy)
15174 {
15175 local_irq_disable();
15176 /*
15177 @@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
15178 }
15179 early_param("idle", idle_setup);
15180
15181 -unsigned long arch_align_stack(unsigned long sp)
15182 +#ifdef CONFIG_PAX_RANDKSTACK
15183 +void pax_randomize_kstack(struct pt_regs *regs)
15184 {
15185 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15186 - sp -= get_random_int() % 8192;
15187 - return sp & ~0xf;
15188 -}
15189 + struct thread_struct *thread = &current->thread;
15190 + unsigned long time;
15191
15192 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15193 -{
15194 - unsigned long range_end = mm->brk + 0x02000000;
15195 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15196 -}
15197 + if (!randomize_va_space)
15198 + return;
15199 +
15200 + if (v8086_mode(regs))
15201 + return;
15202
15203 + rdtscl(time);
15204 +
15205 + /* P4 seems to return a 0 LSB, ignore it */
15206 +#ifdef CONFIG_MPENTIUM4
15207 + time &= 0x3EUL;
15208 + time <<= 2;
15209 +#elif defined(CONFIG_X86_64)
15210 + time &= 0xFUL;
15211 + time <<= 4;
15212 +#else
15213 + time &= 0x1FUL;
15214 + time <<= 3;
15215 +#endif
15216 +
15217 + thread->sp0 ^= time;
15218 + load_sp0(init_tss + smp_processor_id(), thread);
15219 +
15220 +#ifdef CONFIG_X86_64
15221 + percpu_write(kernel_stack, thread->sp0);
15222 +#endif
15223 +}
15224 +#endif
15225 diff -urNp linux-3.0.8/arch/x86/kernel/ptrace.c linux-3.0.8/arch/x86/kernel/ptrace.c
15226 --- linux-3.0.8/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
15227 +++ linux-3.0.8/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
15228 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
15229 unsigned long addr, unsigned long data)
15230 {
15231 int ret;
15232 - unsigned long __user *datap = (unsigned long __user *)data;
15233 + unsigned long __user *datap = (__force unsigned long __user *)data;
15234
15235 switch (request) {
15236 /* read the word at location addr in the USER area. */
15237 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
15238 if ((int) addr < 0)
15239 return -EIO;
15240 ret = do_get_thread_area(child, addr,
15241 - (struct user_desc __user *)data);
15242 + (__force struct user_desc __user *) data);
15243 break;
15244
15245 case PTRACE_SET_THREAD_AREA:
15246 if ((int) addr < 0)
15247 return -EIO;
15248 ret = do_set_thread_area(child, addr,
15249 - (struct user_desc __user *)data, 0);
15250 + (__force struct user_desc __user *) data, 0);
15251 break;
15252 #endif
15253
15254 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
15255 memset(info, 0, sizeof(*info));
15256 info->si_signo = SIGTRAP;
15257 info->si_code = si_code;
15258 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
15259 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
15260 }
15261
15262 void user_single_step_siginfo(struct task_struct *tsk,
15263 diff -urNp linux-3.0.8/arch/x86/kernel/pvclock.c linux-3.0.8/arch/x86/kernel/pvclock.c
15264 --- linux-3.0.8/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
15265 +++ linux-3.0.8/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
15266 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
15267 return pv_tsc_khz;
15268 }
15269
15270 -static atomic64_t last_value = ATOMIC64_INIT(0);
15271 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
15272
15273 void pvclock_resume(void)
15274 {
15275 - atomic64_set(&last_value, 0);
15276 + atomic64_set_unchecked(&last_value, 0);
15277 }
15278
15279 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
15280 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
15281 * updating at the same time, and one of them could be slightly behind,
15282 * making the assumption that last_value always go forward fail to hold.
15283 */
15284 - last = atomic64_read(&last_value);
15285 + last = atomic64_read_unchecked(&last_value);
15286 do {
15287 if (ret < last)
15288 return last;
15289 - last = atomic64_cmpxchg(&last_value, last, ret);
15290 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
15291 } while (unlikely(last != ret));
15292
15293 return ret;
15294 diff -urNp linux-3.0.8/arch/x86/kernel/reboot.c linux-3.0.8/arch/x86/kernel/reboot.c
15295 --- linux-3.0.8/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
15296 +++ linux-3.0.8/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
15297 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
15298 EXPORT_SYMBOL(pm_power_off);
15299
15300 static const struct desc_ptr no_idt = {};
15301 -static int reboot_mode;
15302 +static unsigned short reboot_mode;
15303 enum reboot_type reboot_type = BOOT_ACPI;
15304 int reboot_force;
15305
15306 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
15307 extern const unsigned char machine_real_restart_asm[];
15308 extern const u64 machine_real_restart_gdt[3];
15309
15310 -void machine_real_restart(unsigned int type)
15311 +__noreturn void machine_real_restart(unsigned int type)
15312 {
15313 void *restart_va;
15314 unsigned long restart_pa;
15315 - void (*restart_lowmem)(unsigned int);
15316 + void (* __noreturn restart_lowmem)(unsigned int);
15317 u64 *lowmem_gdt;
15318
15319 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15320 + struct desc_struct *gdt;
15321 +#endif
15322 +
15323 local_irq_disable();
15324
15325 /* Write zero to CMOS register number 0x0f, which the BIOS POST
15326 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
15327 boot)". This seems like a fairly standard thing that gets set by
15328 REBOOT.COM programs, and the previous reset routine did this
15329 too. */
15330 - *((unsigned short *)0x472) = reboot_mode;
15331 + *(unsigned short *)(__va(0x472)) = reboot_mode;
15332
15333 /* Patch the GDT in the low memory trampoline */
15334 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
15335
15336 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
15337 restart_pa = virt_to_phys(restart_va);
15338 - restart_lowmem = (void (*)(unsigned int))restart_pa;
15339 + restart_lowmem = (void *)restart_pa;
15340
15341 /* GDT[0]: GDT self-pointer */
15342 lowmem_gdt[0] =
15343 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
15344 GDT_ENTRY(0x009b, restart_pa, 0xffff);
15345
15346 /* Jump to the identity-mapped low memory code */
15347 +
15348 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15349 + gdt = get_cpu_gdt_table(smp_processor_id());
15350 + pax_open_kernel();
15351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15352 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
15353 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
15354 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
15355 +#endif
15356 +#ifdef CONFIG_PAX_KERNEXEC
15357 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
15358 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
15359 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
15360 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
15361 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
15362 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
15363 +#endif
15364 + pax_close_kernel();
15365 +#endif
15366 +
15367 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15368 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
15369 + unreachable();
15370 +#else
15371 restart_lowmem(type);
15372 +#endif
15373 +
15374 }
15375 #ifdef CONFIG_APM_MODULE
15376 EXPORT_SYMBOL(machine_real_restart);
15377 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
15378 * try to force a triple fault and then cycle between hitting the keyboard
15379 * controller and doing that
15380 */
15381 -static void native_machine_emergency_restart(void)
15382 +__noreturn static void native_machine_emergency_restart(void)
15383 {
15384 int i;
15385 int attempt = 0;
15386 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
15387 #endif
15388 }
15389
15390 -static void __machine_emergency_restart(int emergency)
15391 +static __noreturn void __machine_emergency_restart(int emergency)
15392 {
15393 reboot_emergency = emergency;
15394 machine_ops.emergency_restart();
15395 }
15396
15397 -static void native_machine_restart(char *__unused)
15398 +static __noreturn void native_machine_restart(char *__unused)
15399 {
15400 printk("machine restart\n");
15401
15402 @@ -662,7 +692,7 @@ static void native_machine_restart(char
15403 __machine_emergency_restart(0);
15404 }
15405
15406 -static void native_machine_halt(void)
15407 +static __noreturn void native_machine_halt(void)
15408 {
15409 /* stop other cpus and apics */
15410 machine_shutdown();
15411 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
15412 stop_this_cpu(NULL);
15413 }
15414
15415 -static void native_machine_power_off(void)
15416 +__noreturn static void native_machine_power_off(void)
15417 {
15418 if (pm_power_off) {
15419 if (!reboot_force)
15420 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
15421 }
15422 /* a fallback in case there is no PM info available */
15423 tboot_shutdown(TB_SHUTDOWN_HALT);
15424 + unreachable();
15425 }
15426
15427 struct machine_ops machine_ops = {
15428 diff -urNp linux-3.0.8/arch/x86/kernel/setup.c linux-3.0.8/arch/x86/kernel/setup.c
15429 --- linux-3.0.8/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
15430 +++ linux-3.0.8/arch/x86/kernel/setup.c 2011-10-06 04:17:55.000000000 -0400
15431 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void
15432
15433 switch (data->type) {
15434 case SETUP_E820_EXT:
15435 - parse_e820_ext(data);
15436 + parse_e820_ext((struct setup_data __force_kernel *)data);
15437 break;
15438 case SETUP_DTB:
15439 add_dtb(pa_data);
15440 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
15441 * area (640->1Mb) as ram even though it is not.
15442 * take them out.
15443 */
15444 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
15445 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
15446 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
15447 }
15448
15449 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
15450
15451 if (!boot_params.hdr.root_flags)
15452 root_mountflags &= ~MS_RDONLY;
15453 - init_mm.start_code = (unsigned long) _text;
15454 - init_mm.end_code = (unsigned long) _etext;
15455 + init_mm.start_code = ktla_ktva((unsigned long) _text);
15456 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
15457 init_mm.end_data = (unsigned long) _edata;
15458 init_mm.brk = _brk_end;
15459
15460 - code_resource.start = virt_to_phys(_text);
15461 - code_resource.end = virt_to_phys(_etext)-1;
15462 - data_resource.start = virt_to_phys(_etext);
15463 + code_resource.start = virt_to_phys(ktla_ktva(_text));
15464 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15465 + data_resource.start = virt_to_phys(_sdata);
15466 data_resource.end = virt_to_phys(_edata)-1;
15467 bss_resource.start = virt_to_phys(&__bss_start);
15468 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15469 diff -urNp linux-3.0.8/arch/x86/kernel/setup_percpu.c linux-3.0.8/arch/x86/kernel/setup_percpu.c
15470 --- linux-3.0.8/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
15471 +++ linux-3.0.8/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
15472 @@ -21,19 +21,17 @@
15473 #include <asm/cpu.h>
15474 #include <asm/stackprotector.h>
15475
15476 -DEFINE_PER_CPU(int, cpu_number);
15477 +#ifdef CONFIG_SMP
15478 +DEFINE_PER_CPU(unsigned int, cpu_number);
15479 EXPORT_PER_CPU_SYMBOL(cpu_number);
15480 +#endif
15481
15482 -#ifdef CONFIG_X86_64
15483 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15484 -#else
15485 -#define BOOT_PERCPU_OFFSET 0
15486 -#endif
15487
15488 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15489 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15490
15491 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15492 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15493 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15494 };
15495 EXPORT_SYMBOL(__per_cpu_offset);
15496 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
15497 {
15498 #ifdef CONFIG_X86_32
15499 struct desc_struct gdt;
15500 + unsigned long base = per_cpu_offset(cpu);
15501
15502 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15503 - 0x2 | DESCTYPE_S, 0x8);
15504 - gdt.s = 1;
15505 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15506 + 0x83 | DESCTYPE_S, 0xC);
15507 write_gdt_entry(get_cpu_gdt_table(cpu),
15508 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15509 #endif
15510 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
15511 /* alrighty, percpu areas up and running */
15512 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15513 for_each_possible_cpu(cpu) {
15514 +#ifdef CONFIG_CC_STACKPROTECTOR
15515 +#ifdef CONFIG_X86_32
15516 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
15517 +#endif
15518 +#endif
15519 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15520 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15521 per_cpu(cpu_number, cpu) = cpu;
15522 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
15523 */
15524 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
15525 #endif
15526 +#ifdef CONFIG_CC_STACKPROTECTOR
15527 +#ifdef CONFIG_X86_32
15528 + if (!cpu)
15529 + per_cpu(stack_canary.canary, cpu) = canary;
15530 +#endif
15531 +#endif
15532 /*
15533 * Up to this point, the boot CPU has been using .init.data
15534 * area. Reload any changed state for the boot CPU.
15535 diff -urNp linux-3.0.8/arch/x86/kernel/signal.c linux-3.0.8/arch/x86/kernel/signal.c
15536 --- linux-3.0.8/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
15537 +++ linux-3.0.8/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
15538 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15539 * Align the stack pointer according to the i386 ABI,
15540 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15541 */
15542 - sp = ((sp + 4) & -16ul) - 4;
15543 + sp = ((sp - 12) & -16ul) - 4;
15544 #else /* !CONFIG_X86_32 */
15545 sp = round_down(sp, 16) - 8;
15546 #endif
15547 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15548 * Return an always-bogus address instead so we will die with SIGSEGV.
15549 */
15550 if (onsigstack && !likely(on_sig_stack(sp)))
15551 - return (void __user *)-1L;
15552 + return (__force void __user *)-1L;
15553
15554 /* save i387 state */
15555 if (used_math() && save_i387_xstate(*fpstate) < 0)
15556 - return (void __user *)-1L;
15557 + return (__force void __user *)-1L;
15558
15559 return (void __user *)sp;
15560 }
15561 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15562 }
15563
15564 if (current->mm->context.vdso)
15565 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15566 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15567 else
15568 - restorer = &frame->retcode;
15569 + restorer = (void __user *)&frame->retcode;
15570 if (ka->sa.sa_flags & SA_RESTORER)
15571 restorer = ka->sa.sa_restorer;
15572
15573 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15574 * reasons and because gdb uses it as a signature to notice
15575 * signal handler stack frames.
15576 */
15577 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15578 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15579
15580 if (err)
15581 return -EFAULT;
15582 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15583 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15584
15585 /* Set up to return from userspace. */
15586 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15587 + if (current->mm->context.vdso)
15588 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15589 + else
15590 + restorer = (void __user *)&frame->retcode;
15591 if (ka->sa.sa_flags & SA_RESTORER)
15592 restorer = ka->sa.sa_restorer;
15593 put_user_ex(restorer, &frame->pretcode);
15594 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15595 * reasons and because gdb uses it as a signature to notice
15596 * signal handler stack frames.
15597 */
15598 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15599 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15600 } put_user_catch(err);
15601
15602 if (err)
15603 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
15604 int signr;
15605 sigset_t *oldset;
15606
15607 + pax_track_stack();
15608 +
15609 /*
15610 * We want the common case to go fast, which is why we may in certain
15611 * cases get here from kernel mode. Just return without doing anything
15612 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
15613 * X86_32: vm86 regs switched out by assembly code before reaching
15614 * here, so testing against kernel CS suffices.
15615 */
15616 - if (!user_mode(regs))
15617 + if (!user_mode_novm(regs))
15618 return;
15619
15620 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
15621 diff -urNp linux-3.0.8/arch/x86/kernel/smpboot.c linux-3.0.8/arch/x86/kernel/smpboot.c
15622 --- linux-3.0.8/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
15623 +++ linux-3.0.8/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
15624 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15625 set_idle_for_cpu(cpu, c_idle.idle);
15626 do_rest:
15627 per_cpu(current_task, cpu) = c_idle.idle;
15628 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15629 #ifdef CONFIG_X86_32
15630 /* Stack for startup_32 can be just as for start_secondary onwards */
15631 irq_ctx_init(cpu);
15632 #else
15633 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15634 initial_gs = per_cpu_offset(cpu);
15635 - per_cpu(kernel_stack, cpu) =
15636 - (unsigned long)task_stack_page(c_idle.idle) -
15637 - KERNEL_STACK_OFFSET + THREAD_SIZE;
15638 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15639 #endif
15640 +
15641 + pax_open_kernel();
15642 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15643 + pax_close_kernel();
15644 +
15645 initial_code = (unsigned long)start_secondary;
15646 stack_start = c_idle.idle->thread.sp;
15647
15648 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15649
15650 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15651
15652 +#ifdef CONFIG_PAX_PER_CPU_PGD
15653 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15654 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15655 + KERNEL_PGD_PTRS);
15656 +#endif
15657 +
15658 err = do_boot_cpu(apicid, cpu);
15659 if (err) {
15660 pr_debug("do_boot_cpu failed %d\n", err);
15661 diff -urNp linux-3.0.8/arch/x86/kernel/step.c linux-3.0.8/arch/x86/kernel/step.c
15662 --- linux-3.0.8/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
15663 +++ linux-3.0.8/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
15664 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15665 struct desc_struct *desc;
15666 unsigned long base;
15667
15668 - seg &= ~7UL;
15669 + seg >>= 3;
15670
15671 mutex_lock(&child->mm->context.lock);
15672 - if (unlikely((seg >> 3) >= child->mm->context.size))
15673 + if (unlikely(seg >= child->mm->context.size))
15674 addr = -1L; /* bogus selector, access would fault */
15675 else {
15676 desc = child->mm->context.ldt + seg;
15677 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15678 addr += base;
15679 }
15680 mutex_unlock(&child->mm->context.lock);
15681 - }
15682 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15683 + addr = ktla_ktva(addr);
15684
15685 return addr;
15686 }
15687 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15688 unsigned char opcode[15];
15689 unsigned long addr = convert_ip_to_linear(child, regs);
15690
15691 + if (addr == -EINVAL)
15692 + return 0;
15693 +
15694 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15695 for (i = 0; i < copied; i++) {
15696 switch (opcode[i]) {
15697 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
15698
15699 #ifdef CONFIG_X86_64
15700 case 0x40 ... 0x4f:
15701 - if (regs->cs != __USER_CS)
15702 + if ((regs->cs & 0xffff) != __USER_CS)
15703 /* 32-bit mode: register increment */
15704 return 0;
15705 /* 64-bit mode: REX prefix */
15706 diff -urNp linux-3.0.8/arch/x86/kernel/syscall_table_32.S linux-3.0.8/arch/x86/kernel/syscall_table_32.S
15707 --- linux-3.0.8/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
15708 +++ linux-3.0.8/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
15709 @@ -1,3 +1,4 @@
15710 +.section .rodata,"a",@progbits
15711 ENTRY(sys_call_table)
15712 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15713 .long sys_exit
15714 diff -urNp linux-3.0.8/arch/x86/kernel/sys_i386_32.c linux-3.0.8/arch/x86/kernel/sys_i386_32.c
15715 --- linux-3.0.8/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
15716 +++ linux-3.0.8/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
15717 @@ -24,17 +24,224 @@
15718
15719 #include <asm/syscalls.h>
15720
15721 -/*
15722 - * Do a system call from kernel instead of calling sys_execve so we
15723 - * end up with proper pt_regs.
15724 - */
15725 -int kernel_execve(const char *filename,
15726 - const char *const argv[],
15727 - const char *const envp[])
15728 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15729 {
15730 - long __res;
15731 - asm volatile ("int $0x80"
15732 - : "=a" (__res)
15733 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15734 - return __res;
15735 + unsigned long pax_task_size = TASK_SIZE;
15736 +
15737 +#ifdef CONFIG_PAX_SEGMEXEC
15738 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15739 + pax_task_size = SEGMEXEC_TASK_SIZE;
15740 +#endif
15741 +
15742 + if (len > pax_task_size || addr > pax_task_size - len)
15743 + return -EINVAL;
15744 +
15745 + return 0;
15746 +}
15747 +
15748 +unsigned long
15749 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
15750 + unsigned long len, unsigned long pgoff, unsigned long flags)
15751 +{
15752 + struct mm_struct *mm = current->mm;
15753 + struct vm_area_struct *vma;
15754 + unsigned long start_addr, pax_task_size = TASK_SIZE;
15755 +
15756 +#ifdef CONFIG_PAX_SEGMEXEC
15757 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15758 + pax_task_size = SEGMEXEC_TASK_SIZE;
15759 +#endif
15760 +
15761 + pax_task_size -= PAGE_SIZE;
15762 +
15763 + if (len > pax_task_size)
15764 + return -ENOMEM;
15765 +
15766 + if (flags & MAP_FIXED)
15767 + return addr;
15768 +
15769 +#ifdef CONFIG_PAX_RANDMMAP
15770 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15771 +#endif
15772 +
15773 + if (addr) {
15774 + addr = PAGE_ALIGN(addr);
15775 + if (pax_task_size - len >= addr) {
15776 + vma = find_vma(mm, addr);
15777 + if (check_heap_stack_gap(vma, addr, len))
15778 + return addr;
15779 + }
15780 + }
15781 + if (len > mm->cached_hole_size) {
15782 + start_addr = addr = mm->free_area_cache;
15783 + } else {
15784 + start_addr = addr = mm->mmap_base;
15785 + mm->cached_hole_size = 0;
15786 + }
15787 +
15788 +#ifdef CONFIG_PAX_PAGEEXEC
15789 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15790 + start_addr = 0x00110000UL;
15791 +
15792 +#ifdef CONFIG_PAX_RANDMMAP
15793 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15794 + start_addr += mm->delta_mmap & 0x03FFF000UL;
15795 +#endif
15796 +
15797 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15798 + start_addr = addr = mm->mmap_base;
15799 + else
15800 + addr = start_addr;
15801 + }
15802 +#endif
15803 +
15804 +full_search:
15805 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15806 + /* At this point: (!vma || addr < vma->vm_end). */
15807 + if (pax_task_size - len < addr) {
15808 + /*
15809 + * Start a new search - just in case we missed
15810 + * some holes.
15811 + */
15812 + if (start_addr != mm->mmap_base) {
15813 + start_addr = addr = mm->mmap_base;
15814 + mm->cached_hole_size = 0;
15815 + goto full_search;
15816 + }
15817 + return -ENOMEM;
15818 + }
15819 + if (check_heap_stack_gap(vma, addr, len))
15820 + break;
15821 + if (addr + mm->cached_hole_size < vma->vm_start)
15822 + mm->cached_hole_size = vma->vm_start - addr;
15823 + addr = vma->vm_end;
15824 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
15825 + start_addr = addr = mm->mmap_base;
15826 + mm->cached_hole_size = 0;
15827 + goto full_search;
15828 + }
15829 + }
15830 +
15831 + /*
15832 + * Remember the place where we stopped the search:
15833 + */
15834 + mm->free_area_cache = addr + len;
15835 + return addr;
15836 +}
15837 +
15838 +unsigned long
15839 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15840 + const unsigned long len, const unsigned long pgoff,
15841 + const unsigned long flags)
15842 +{
15843 + struct vm_area_struct *vma;
15844 + struct mm_struct *mm = current->mm;
15845 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15846 +
15847 +#ifdef CONFIG_PAX_SEGMEXEC
15848 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15849 + pax_task_size = SEGMEXEC_TASK_SIZE;
15850 +#endif
15851 +
15852 + pax_task_size -= PAGE_SIZE;
15853 +
15854 + /* requested length too big for entire address space */
15855 + if (len > pax_task_size)
15856 + return -ENOMEM;
15857 +
15858 + if (flags & MAP_FIXED)
15859 + return addr;
15860 +
15861 +#ifdef CONFIG_PAX_PAGEEXEC
15862 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15863 + goto bottomup;
15864 +#endif
15865 +
15866 +#ifdef CONFIG_PAX_RANDMMAP
15867 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15868 +#endif
15869 +
15870 + /* requesting a specific address */
15871 + if (addr) {
15872 + addr = PAGE_ALIGN(addr);
15873 + if (pax_task_size - len >= addr) {
15874 + vma = find_vma(mm, addr);
15875 + if (check_heap_stack_gap(vma, addr, len))
15876 + return addr;
15877 + }
15878 + }
15879 +
15880 + /* check if free_area_cache is useful for us */
15881 + if (len <= mm->cached_hole_size) {
15882 + mm->cached_hole_size = 0;
15883 + mm->free_area_cache = mm->mmap_base;
15884 + }
15885 +
15886 + /* either no address requested or can't fit in requested address hole */
15887 + addr = mm->free_area_cache;
15888 +
15889 + /* make sure it can fit in the remaining address space */
15890 + if (addr > len) {
15891 + vma = find_vma(mm, addr-len);
15892 + if (check_heap_stack_gap(vma, addr - len, len))
15893 + /* remember the address as a hint for next time */
15894 + return (mm->free_area_cache = addr-len);
15895 + }
15896 +
15897 + if (mm->mmap_base < len)
15898 + goto bottomup;
15899 +
15900 + addr = mm->mmap_base-len;
15901 +
15902 + do {
15903 + /*
15904 + * Lookup failure means no vma is above this address,
15905 + * else if new region fits below vma->vm_start,
15906 + * return with success:
15907 + */
15908 + vma = find_vma(mm, addr);
15909 + if (check_heap_stack_gap(vma, addr, len))
15910 + /* remember the address as a hint for next time */
15911 + return (mm->free_area_cache = addr);
15912 +
15913 + /* remember the largest hole we saw so far */
15914 + if (addr + mm->cached_hole_size < vma->vm_start)
15915 + mm->cached_hole_size = vma->vm_start - addr;
15916 +
15917 + /* try just below the current vma->vm_start */
15918 + addr = skip_heap_stack_gap(vma, len);
15919 + } while (!IS_ERR_VALUE(addr));
15920 +
15921 +bottomup:
15922 + /*
15923 + * A failed mmap() very likely causes application failure,
15924 + * so fall back to the bottom-up function here. This scenario
15925 + * can happen with large stack limits and large mmap()
15926 + * allocations.
15927 + */
15928 +
15929 +#ifdef CONFIG_PAX_SEGMEXEC
15930 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15931 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15932 + else
15933 +#endif
15934 +
15935 + mm->mmap_base = TASK_UNMAPPED_BASE;
15936 +
15937 +#ifdef CONFIG_PAX_RANDMMAP
15938 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15939 + mm->mmap_base += mm->delta_mmap;
15940 +#endif
15941 +
15942 + mm->free_area_cache = mm->mmap_base;
15943 + mm->cached_hole_size = ~0UL;
15944 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15945 + /*
15946 + * Restore the topdown base:
15947 + */
15948 + mm->mmap_base = base;
15949 + mm->free_area_cache = base;
15950 + mm->cached_hole_size = ~0UL;
15951 +
15952 + return addr;
15953 }
15954 diff -urNp linux-3.0.8/arch/x86/kernel/sys_x86_64.c linux-3.0.8/arch/x86/kernel/sys_x86_64.c
15955 --- linux-3.0.8/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15956 +++ linux-3.0.8/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15957 @@ -32,8 +32,8 @@ out:
15958 return error;
15959 }
15960
15961 -static void find_start_end(unsigned long flags, unsigned long *begin,
15962 - unsigned long *end)
15963 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15964 + unsigned long *begin, unsigned long *end)
15965 {
15966 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15967 unsigned long new_begin;
15968 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15969 *begin = new_begin;
15970 }
15971 } else {
15972 - *begin = TASK_UNMAPPED_BASE;
15973 + *begin = mm->mmap_base;
15974 *end = TASK_SIZE;
15975 }
15976 }
15977 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15978 if (flags & MAP_FIXED)
15979 return addr;
15980
15981 - find_start_end(flags, &begin, &end);
15982 + find_start_end(mm, flags, &begin, &end);
15983
15984 if (len > end)
15985 return -ENOMEM;
15986
15987 +#ifdef CONFIG_PAX_RANDMMAP
15988 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15989 +#endif
15990 +
15991 if (addr) {
15992 addr = PAGE_ALIGN(addr);
15993 vma = find_vma(mm, addr);
15994 - if (end - len >= addr &&
15995 - (!vma || addr + len <= vma->vm_start))
15996 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15997 return addr;
15998 }
15999 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16000 @@ -106,7 +109,7 @@ full_search:
16001 }
16002 return -ENOMEM;
16003 }
16004 - if (!vma || addr + len <= vma->vm_start) {
16005 + if (check_heap_stack_gap(vma, addr, len)) {
16006 /*
16007 * Remember the place where we stopped the search:
16008 */
16009 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16010 {
16011 struct vm_area_struct *vma;
16012 struct mm_struct *mm = current->mm;
16013 - unsigned long addr = addr0;
16014 + unsigned long base = mm->mmap_base, addr = addr0;
16015
16016 /* requested length too big for entire address space */
16017 if (len > TASK_SIZE)
16018 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16019 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16020 goto bottomup;
16021
16022 +#ifdef CONFIG_PAX_RANDMMAP
16023 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16024 +#endif
16025 +
16026 /* requesting a specific address */
16027 if (addr) {
16028 addr = PAGE_ALIGN(addr);
16029 - vma = find_vma(mm, addr);
16030 - if (TASK_SIZE - len >= addr &&
16031 - (!vma || addr + len <= vma->vm_start))
16032 - return addr;
16033 + if (TASK_SIZE - len >= addr) {
16034 + vma = find_vma(mm, addr);
16035 + if (check_heap_stack_gap(vma, addr, len))
16036 + return addr;
16037 + }
16038 }
16039
16040 /* check if free_area_cache is useful for us */
16041 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16042 /* make sure it can fit in the remaining address space */
16043 if (addr > len) {
16044 vma = find_vma(mm, addr-len);
16045 - if (!vma || addr <= vma->vm_start)
16046 + if (check_heap_stack_gap(vma, addr - len, len))
16047 /* remember the address as a hint for next time */
16048 return mm->free_area_cache = addr-len;
16049 }
16050 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16051 * return with success:
16052 */
16053 vma = find_vma(mm, addr);
16054 - if (!vma || addr+len <= vma->vm_start)
16055 + if (check_heap_stack_gap(vma, addr, len))
16056 /* remember the address as a hint for next time */
16057 return mm->free_area_cache = addr;
16058
16059 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16060 mm->cached_hole_size = vma->vm_start - addr;
16061
16062 /* try just below the current vma->vm_start */
16063 - addr = vma->vm_start-len;
16064 - } while (len < vma->vm_start);
16065 + addr = skip_heap_stack_gap(vma, len);
16066 + } while (!IS_ERR_VALUE(addr));
16067
16068 bottomup:
16069 /*
16070 @@ -198,13 +206,21 @@ bottomup:
16071 * can happen with large stack limits and large mmap()
16072 * allocations.
16073 */
16074 + mm->mmap_base = TASK_UNMAPPED_BASE;
16075 +
16076 +#ifdef CONFIG_PAX_RANDMMAP
16077 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16078 + mm->mmap_base += mm->delta_mmap;
16079 +#endif
16080 +
16081 + mm->free_area_cache = mm->mmap_base;
16082 mm->cached_hole_size = ~0UL;
16083 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16084 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16085 /*
16086 * Restore the topdown base:
16087 */
16088 - mm->free_area_cache = mm->mmap_base;
16089 + mm->mmap_base = base;
16090 + mm->free_area_cache = base;
16091 mm->cached_hole_size = ~0UL;
16092
16093 return addr;
16094 diff -urNp linux-3.0.8/arch/x86/kernel/tboot.c linux-3.0.8/arch/x86/kernel/tboot.c
16095 --- linux-3.0.8/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
16096 +++ linux-3.0.8/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
16097 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
16098
16099 void tboot_shutdown(u32 shutdown_type)
16100 {
16101 - void (*shutdown)(void);
16102 + void (* __noreturn shutdown)(void);
16103
16104 if (!tboot_enabled())
16105 return;
16106 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
16107
16108 switch_to_tboot_pt();
16109
16110 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16111 + shutdown = (void *)tboot->shutdown_entry;
16112 shutdown();
16113
16114 /* should not reach here */
16115 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16116 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16117 }
16118
16119 -static atomic_t ap_wfs_count;
16120 +static atomic_unchecked_t ap_wfs_count;
16121
16122 static int tboot_wait_for_aps(int num_aps)
16123 {
16124 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
16125 {
16126 switch (action) {
16127 case CPU_DYING:
16128 - atomic_inc(&ap_wfs_count);
16129 + atomic_inc_unchecked(&ap_wfs_count);
16130 if (num_online_cpus() == 1)
16131 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16132 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16133 return NOTIFY_BAD;
16134 break;
16135 }
16136 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
16137
16138 tboot_create_trampoline();
16139
16140 - atomic_set(&ap_wfs_count, 0);
16141 + atomic_set_unchecked(&ap_wfs_count, 0);
16142 register_hotcpu_notifier(&tboot_cpu_notifier);
16143 return 0;
16144 }
16145 diff -urNp linux-3.0.8/arch/x86/kernel/time.c linux-3.0.8/arch/x86/kernel/time.c
16146 --- linux-3.0.8/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
16147 +++ linux-3.0.8/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
16148 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
16149 {
16150 unsigned long pc = instruction_pointer(regs);
16151
16152 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16153 + if (!user_mode(regs) && in_lock_functions(pc)) {
16154 #ifdef CONFIG_FRAME_POINTER
16155 - return *(unsigned long *)(regs->bp + sizeof(long));
16156 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16157 #else
16158 unsigned long *sp =
16159 (unsigned long *)kernel_stack_pointer(regs);
16160 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16161 * or above a saved flags. Eflags has bits 22-31 zero,
16162 * kernel addresses don't.
16163 */
16164 +
16165 +#ifdef CONFIG_PAX_KERNEXEC
16166 + return ktla_ktva(sp[0]);
16167 +#else
16168 if (sp[0] >> 22)
16169 return sp[0];
16170 if (sp[1] >> 22)
16171 return sp[1];
16172 #endif
16173 +
16174 +#endif
16175 }
16176 return pc;
16177 }
16178 diff -urNp linux-3.0.8/arch/x86/kernel/tls.c linux-3.0.8/arch/x86/kernel/tls.c
16179 --- linux-3.0.8/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
16180 +++ linux-3.0.8/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
16181 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16182 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16183 return -EINVAL;
16184
16185 +#ifdef CONFIG_PAX_SEGMEXEC
16186 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16187 + return -EINVAL;
16188 +#endif
16189 +
16190 set_tls_desc(p, idx, &info, 1);
16191
16192 return 0;
16193 diff -urNp linux-3.0.8/arch/x86/kernel/trampoline_32.S linux-3.0.8/arch/x86/kernel/trampoline_32.S
16194 --- linux-3.0.8/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
16195 +++ linux-3.0.8/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
16196 @@ -32,6 +32,12 @@
16197 #include <asm/segment.h>
16198 #include <asm/page_types.h>
16199
16200 +#ifdef CONFIG_PAX_KERNEXEC
16201 +#define ta(X) (X)
16202 +#else
16203 +#define ta(X) ((X) - __PAGE_OFFSET)
16204 +#endif
16205 +
16206 #ifdef CONFIG_SMP
16207
16208 .section ".x86_trampoline","a"
16209 @@ -62,7 +68,7 @@ r_base = .
16210 inc %ax # protected mode (PE) bit
16211 lmsw %ax # into protected mode
16212 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16213 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16214 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
16215
16216 # These need to be in the same 64K segment as the above;
16217 # hence we don't use the boot_gdt_descr defined in head.S
16218 diff -urNp linux-3.0.8/arch/x86/kernel/trampoline_64.S linux-3.0.8/arch/x86/kernel/trampoline_64.S
16219 --- linux-3.0.8/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
16220 +++ linux-3.0.8/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
16221 @@ -90,7 +90,7 @@ startup_32:
16222 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16223 movl %eax, %ds
16224
16225 - movl $X86_CR4_PAE, %eax
16226 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16227 movl %eax, %cr4 # Enable PAE mode
16228
16229 # Setup trampoline 4 level pagetables
16230 @@ -138,7 +138,7 @@ tidt:
16231 # so the kernel can live anywhere
16232 .balign 4
16233 tgdt:
16234 - .short tgdt_end - tgdt # gdt limit
16235 + .short tgdt_end - tgdt - 1 # gdt limit
16236 .long tgdt - r_base
16237 .short 0
16238 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16239 diff -urNp linux-3.0.8/arch/x86/kernel/traps.c linux-3.0.8/arch/x86/kernel/traps.c
16240 --- linux-3.0.8/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
16241 +++ linux-3.0.8/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
16242 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
16243
16244 /* Do we ignore FPU interrupts ? */
16245 char ignore_fpu_irq;
16246 -
16247 -/*
16248 - * The IDT has to be page-aligned to simplify the Pentium
16249 - * F0 0F bug workaround.
16250 - */
16251 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16252 #endif
16253
16254 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16255 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
16256 }
16257
16258 static void __kprobes
16259 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16260 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16261 long error_code, siginfo_t *info)
16262 {
16263 struct task_struct *tsk = current;
16264
16265 #ifdef CONFIG_X86_32
16266 - if (regs->flags & X86_VM_MASK) {
16267 + if (v8086_mode(regs)) {
16268 /*
16269 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16270 * On nmi (interrupt 2), do_trap should not be called.
16271 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
16272 }
16273 #endif
16274
16275 - if (!user_mode(regs))
16276 + if (!user_mode_novm(regs))
16277 goto kernel_trap;
16278
16279 #ifdef CONFIG_X86_32
16280 @@ -157,7 +151,7 @@ trap_signal:
16281 printk_ratelimit()) {
16282 printk(KERN_INFO
16283 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16284 - tsk->comm, tsk->pid, str,
16285 + tsk->comm, task_pid_nr(tsk), str,
16286 regs->ip, regs->sp, error_code);
16287 print_vma_addr(" in ", regs->ip);
16288 printk("\n");
16289 @@ -174,8 +168,20 @@ kernel_trap:
16290 if (!fixup_exception(regs)) {
16291 tsk->thread.error_code = error_code;
16292 tsk->thread.trap_no = trapnr;
16293 +
16294 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16295 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16296 + str = "PAX: suspicious stack segment fault";
16297 +#endif
16298 +
16299 die(str, regs, error_code);
16300 }
16301 +
16302 +#ifdef CONFIG_PAX_REFCOUNT
16303 + if (trapnr == 4)
16304 + pax_report_refcount_overflow(regs);
16305 +#endif
16306 +
16307 return;
16308
16309 #ifdef CONFIG_X86_32
16310 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
16311 conditional_sti(regs);
16312
16313 #ifdef CONFIG_X86_32
16314 - if (regs->flags & X86_VM_MASK)
16315 + if (v8086_mode(regs))
16316 goto gp_in_vm86;
16317 #endif
16318
16319 tsk = current;
16320 - if (!user_mode(regs))
16321 + if (!user_mode_novm(regs))
16322 goto gp_in_kernel;
16323
16324 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16325 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16326 + struct mm_struct *mm = tsk->mm;
16327 + unsigned long limit;
16328 +
16329 + down_write(&mm->mmap_sem);
16330 + limit = mm->context.user_cs_limit;
16331 + if (limit < TASK_SIZE) {
16332 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16333 + up_write(&mm->mmap_sem);
16334 + return;
16335 + }
16336 + up_write(&mm->mmap_sem);
16337 + }
16338 +#endif
16339 +
16340 tsk->thread.error_code = error_code;
16341 tsk->thread.trap_no = 13;
16342
16343 @@ -304,6 +326,13 @@ gp_in_kernel:
16344 if (notify_die(DIE_GPF, "general protection fault", regs,
16345 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16346 return;
16347 +
16348 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16349 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16350 + die("PAX: suspicious general protection fault", regs, error_code);
16351 + else
16352 +#endif
16353 +
16354 die("general protection fault", regs, error_code);
16355 }
16356
16357 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
16358 dotraplinkage notrace __kprobes void
16359 do_nmi(struct pt_regs *regs, long error_code)
16360 {
16361 +
16362 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16363 + if (!user_mode(regs)) {
16364 + unsigned long cs = regs->cs & 0xFFFF;
16365 + unsigned long ip = ktva_ktla(regs->ip);
16366 +
16367 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16368 + regs->ip = ip;
16369 + }
16370 +#endif
16371 +
16372 nmi_enter();
16373
16374 inc_irq_stat(__nmi_count);
16375 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
16376 /* It's safe to allow irq's after DR6 has been saved */
16377 preempt_conditional_sti(regs);
16378
16379 - if (regs->flags & X86_VM_MASK) {
16380 + if (v8086_mode(regs)) {
16381 handle_vm86_trap((struct kernel_vm86_regs *) regs,
16382 error_code, 1);
16383 preempt_conditional_cli(regs);
16384 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
16385 * We already checked v86 mode above, so we can check for kernel mode
16386 * by just checking the CPL of CS.
16387 */
16388 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
16389 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
16390 tsk->thread.debugreg6 &= ~DR_STEP;
16391 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
16392 regs->flags &= ~X86_EFLAGS_TF;
16393 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
16394 return;
16395 conditional_sti(regs);
16396
16397 - if (!user_mode_vm(regs))
16398 + if (!user_mode(regs))
16399 {
16400 if (!fixup_exception(regs)) {
16401 task->thread.error_code = error_code;
16402 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
16403 void __math_state_restore(void)
16404 {
16405 struct thread_info *thread = current_thread_info();
16406 - struct task_struct *tsk = thread->task;
16407 + struct task_struct *tsk = current;
16408
16409 /*
16410 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16411 @@ -750,8 +790,7 @@ void __math_state_restore(void)
16412 */
16413 asmlinkage void math_state_restore(void)
16414 {
16415 - struct thread_info *thread = current_thread_info();
16416 - struct task_struct *tsk = thread->task;
16417 + struct task_struct *tsk = current;
16418
16419 if (!tsk_used_math(tsk)) {
16420 local_irq_enable();
16421 diff -urNp linux-3.0.8/arch/x86/kernel/verify_cpu.S linux-3.0.8/arch/x86/kernel/verify_cpu.S
16422 --- linux-3.0.8/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
16423 +++ linux-3.0.8/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
16424 @@ -20,6 +20,7 @@
16425 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
16426 * arch/x86/kernel/trampoline_64.S: secondary processor verification
16427 * arch/x86/kernel/head_32.S: processor startup
16428 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
16429 *
16430 * verify_cpu, returns the status of longmode and SSE in register %eax.
16431 * 0: Success 1: Failure
16432 diff -urNp linux-3.0.8/arch/x86/kernel/vm86_32.c linux-3.0.8/arch/x86/kernel/vm86_32.c
16433 --- linux-3.0.8/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
16434 +++ linux-3.0.8/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
16435 @@ -41,6 +41,7 @@
16436 #include <linux/ptrace.h>
16437 #include <linux/audit.h>
16438 #include <linux/stddef.h>
16439 +#include <linux/grsecurity.h>
16440
16441 #include <asm/uaccess.h>
16442 #include <asm/io.h>
16443 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16444 do_exit(SIGSEGV);
16445 }
16446
16447 - tss = &per_cpu(init_tss, get_cpu());
16448 + tss = init_tss + get_cpu();
16449 current->thread.sp0 = current->thread.saved_sp0;
16450 current->thread.sysenter_cs = __KERNEL_CS;
16451 load_sp0(tss, &current->thread);
16452 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
16453 struct task_struct *tsk;
16454 int tmp, ret = -EPERM;
16455
16456 +#ifdef CONFIG_GRKERNSEC_VM86
16457 + if (!capable(CAP_SYS_RAWIO)) {
16458 + gr_handle_vm86();
16459 + goto out;
16460 + }
16461 +#endif
16462 +
16463 tsk = current;
16464 if (tsk->thread.saved_sp0)
16465 goto out;
16466 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
16467 int tmp, ret;
16468 struct vm86plus_struct __user *v86;
16469
16470 +#ifdef CONFIG_GRKERNSEC_VM86
16471 + if (!capable(CAP_SYS_RAWIO)) {
16472 + gr_handle_vm86();
16473 + ret = -EPERM;
16474 + goto out;
16475 + }
16476 +#endif
16477 +
16478 tsk = current;
16479 switch (cmd) {
16480 case VM86_REQUEST_IRQ:
16481 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16482 tsk->thread.saved_fs = info->regs32->fs;
16483 tsk->thread.saved_gs = get_user_gs(info->regs32);
16484
16485 - tss = &per_cpu(init_tss, get_cpu());
16486 + tss = init_tss + get_cpu();
16487 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16488 if (cpu_has_sep)
16489 tsk->thread.sysenter_cs = 0;
16490 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16491 goto cannot_handle;
16492 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16493 goto cannot_handle;
16494 - intr_ptr = (unsigned long __user *) (i << 2);
16495 + intr_ptr = (__force unsigned long __user *) (i << 2);
16496 if (get_user(segoffs, intr_ptr))
16497 goto cannot_handle;
16498 if ((segoffs >> 16) == BIOSSEG)
16499 diff -urNp linux-3.0.8/arch/x86/kernel/vmlinux.lds.S linux-3.0.8/arch/x86/kernel/vmlinux.lds.S
16500 --- linux-3.0.8/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
16501 +++ linux-3.0.8/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
16502 @@ -26,6 +26,13 @@
16503 #include <asm/page_types.h>
16504 #include <asm/cache.h>
16505 #include <asm/boot.h>
16506 +#include <asm/segment.h>
16507 +
16508 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16509 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
16510 +#else
16511 +#define __KERNEL_TEXT_OFFSET 0
16512 +#endif
16513
16514 #undef i386 /* in case the preprocessor is a 32bit one */
16515
16516 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
16517
16518 PHDRS {
16519 text PT_LOAD FLAGS(5); /* R_E */
16520 +#ifdef CONFIG_X86_32
16521 + module PT_LOAD FLAGS(5); /* R_E */
16522 +#endif
16523 +#ifdef CONFIG_XEN
16524 + rodata PT_LOAD FLAGS(5); /* R_E */
16525 +#else
16526 + rodata PT_LOAD FLAGS(4); /* R__ */
16527 +#endif
16528 data PT_LOAD FLAGS(6); /* RW_ */
16529 #ifdef CONFIG_X86_64
16530 user PT_LOAD FLAGS(5); /* R_E */
16531 +#endif
16532 + init.begin PT_LOAD FLAGS(6); /* RW_ */
16533 #ifdef CONFIG_SMP
16534 percpu PT_LOAD FLAGS(6); /* RW_ */
16535 #endif
16536 + text.init PT_LOAD FLAGS(5); /* R_E */
16537 + text.exit PT_LOAD FLAGS(5); /* R_E */
16538 init PT_LOAD FLAGS(7); /* RWE */
16539 -#endif
16540 note PT_NOTE FLAGS(0); /* ___ */
16541 }
16542
16543 SECTIONS
16544 {
16545 #ifdef CONFIG_X86_32
16546 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16547 - phys_startup_32 = startup_32 - LOAD_OFFSET;
16548 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16549 #else
16550 - . = __START_KERNEL;
16551 - phys_startup_64 = startup_64 - LOAD_OFFSET;
16552 + . = __START_KERNEL;
16553 #endif
16554
16555 /* Text and read-only data */
16556 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
16557 - _text = .;
16558 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16559 /* bootstrapping code */
16560 +#ifdef CONFIG_X86_32
16561 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16562 +#else
16563 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16564 +#endif
16565 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16566 + _text = .;
16567 HEAD_TEXT
16568 #ifdef CONFIG_X86_32
16569 . = ALIGN(PAGE_SIZE);
16570 @@ -109,13 +131,47 @@ SECTIONS
16571 IRQENTRY_TEXT
16572 *(.fixup)
16573 *(.gnu.warning)
16574 - /* End of text section */
16575 - _etext = .;
16576 } :text = 0x9090
16577
16578 - NOTES :text :note
16579 + . += __KERNEL_TEXT_OFFSET;
16580 +
16581 +#ifdef CONFIG_X86_32
16582 + . = ALIGN(PAGE_SIZE);
16583 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16584 +
16585 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16586 + MODULES_EXEC_VADDR = .;
16587 + BYTE(0)
16588 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16589 + . = ALIGN(HPAGE_SIZE);
16590 + MODULES_EXEC_END = . - 1;
16591 +#endif
16592 +
16593 + } :module
16594 +#endif
16595 +
16596 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16597 + /* End of text section */
16598 + _etext = . - __KERNEL_TEXT_OFFSET;
16599 + }
16600 +
16601 +#ifdef CONFIG_X86_32
16602 + . = ALIGN(PAGE_SIZE);
16603 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16604 + *(.idt)
16605 + . = ALIGN(PAGE_SIZE);
16606 + *(.empty_zero_page)
16607 + *(.initial_pg_fixmap)
16608 + *(.initial_pg_pmd)
16609 + *(.initial_page_table)
16610 + *(.swapper_pg_dir)
16611 + } :rodata
16612 +#endif
16613 +
16614 + . = ALIGN(PAGE_SIZE);
16615 + NOTES :rodata :note
16616
16617 - EXCEPTION_TABLE(16) :text = 0x9090
16618 + EXCEPTION_TABLE(16) :rodata
16619
16620 #if defined(CONFIG_DEBUG_RODATA)
16621 /* .text should occupy whole number of pages */
16622 @@ -127,16 +183,20 @@ SECTIONS
16623
16624 /* Data */
16625 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16626 +
16627 +#ifdef CONFIG_PAX_KERNEXEC
16628 + . = ALIGN(HPAGE_SIZE);
16629 +#else
16630 + . = ALIGN(PAGE_SIZE);
16631 +#endif
16632 +
16633 /* Start of data section */
16634 _sdata = .;
16635
16636 /* init_task */
16637 INIT_TASK_DATA(THREAD_SIZE)
16638
16639 -#ifdef CONFIG_X86_32
16640 - /* 32 bit has nosave before _edata */
16641 NOSAVE_DATA
16642 -#endif
16643
16644 PAGE_ALIGNED_DATA(PAGE_SIZE)
16645
16646 @@ -208,12 +268,19 @@ SECTIONS
16647 #endif /* CONFIG_X86_64 */
16648
16649 /* Init code and data - will be freed after init */
16650 - . = ALIGN(PAGE_SIZE);
16651 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16652 + BYTE(0)
16653 +
16654 +#ifdef CONFIG_PAX_KERNEXEC
16655 + . = ALIGN(HPAGE_SIZE);
16656 +#else
16657 + . = ALIGN(PAGE_SIZE);
16658 +#endif
16659 +
16660 __init_begin = .; /* paired with __init_end */
16661 - }
16662 + } :init.begin
16663
16664 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16665 +#ifdef CONFIG_SMP
16666 /*
16667 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16668 * output PHDR, so the next output section - .init.text - should
16669 @@ -222,12 +289,27 @@ SECTIONS
16670 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16671 #endif
16672
16673 - INIT_TEXT_SECTION(PAGE_SIZE)
16674 -#ifdef CONFIG_X86_64
16675 - :init
16676 -#endif
16677 + . = ALIGN(PAGE_SIZE);
16678 + init_begin = .;
16679 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16680 + VMLINUX_SYMBOL(_sinittext) = .;
16681 + INIT_TEXT
16682 + VMLINUX_SYMBOL(_einittext) = .;
16683 + . = ALIGN(PAGE_SIZE);
16684 + } :text.init
16685
16686 - INIT_DATA_SECTION(16)
16687 + /*
16688 + * .exit.text is discard at runtime, not link time, to deal with
16689 + * references from .altinstructions and .eh_frame
16690 + */
16691 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16692 + EXIT_TEXT
16693 + . = ALIGN(16);
16694 + } :text.exit
16695 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16696 +
16697 + . = ALIGN(PAGE_SIZE);
16698 + INIT_DATA_SECTION(16) :init
16699
16700 /*
16701 * Code and data for a variety of lowlevel trampolines, to be
16702 @@ -301,19 +383,12 @@ SECTIONS
16703 }
16704
16705 . = ALIGN(8);
16706 - /*
16707 - * .exit.text is discard at runtime, not link time, to deal with
16708 - * references from .altinstructions and .eh_frame
16709 - */
16710 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16711 - EXIT_TEXT
16712 - }
16713
16714 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16715 EXIT_DATA
16716 }
16717
16718 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16719 +#ifndef CONFIG_SMP
16720 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16721 #endif
16722
16723 @@ -332,16 +407,10 @@ SECTIONS
16724 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16725 __smp_locks = .;
16726 *(.smp_locks)
16727 - . = ALIGN(PAGE_SIZE);
16728 __smp_locks_end = .;
16729 + . = ALIGN(PAGE_SIZE);
16730 }
16731
16732 -#ifdef CONFIG_X86_64
16733 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16734 - NOSAVE_DATA
16735 - }
16736 -#endif
16737 -
16738 /* BSS */
16739 . = ALIGN(PAGE_SIZE);
16740 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16741 @@ -357,6 +426,7 @@ SECTIONS
16742 __brk_base = .;
16743 . += 64 * 1024; /* 64k alignment slop space */
16744 *(.brk_reservation) /* areas brk users have reserved */
16745 + . = ALIGN(HPAGE_SIZE);
16746 __brk_limit = .;
16747 }
16748
16749 @@ -383,13 +453,12 @@ SECTIONS
16750 * for the boot processor.
16751 */
16752 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16753 -INIT_PER_CPU(gdt_page);
16754 INIT_PER_CPU(irq_stack_union);
16755
16756 /*
16757 * Build-time check on the image size:
16758 */
16759 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16760 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16761 "kernel image bigger than KERNEL_IMAGE_SIZE");
16762
16763 #ifdef CONFIG_SMP
16764 diff -urNp linux-3.0.8/arch/x86/kernel/vsyscall_64.c linux-3.0.8/arch/x86/kernel/vsyscall_64.c
16765 --- linux-3.0.8/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
16766 +++ linux-3.0.8/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
16767 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
16768 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
16769 {
16770 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16771 - .sysctl_enabled = 1,
16772 + .sysctl_enabled = 0,
16773 };
16774
16775 void update_vsyscall_tz(void)
16776 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
16777 static ctl_table kernel_table2[] = {
16778 { .procname = "vsyscall64",
16779 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
16780 - .mode = 0644,
16781 + .mode = 0444,
16782 .proc_handler = proc_dointvec },
16783 {}
16784 };
16785 diff -urNp linux-3.0.8/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.8/arch/x86/kernel/x8664_ksyms_64.c
16786 --- linux-3.0.8/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
16787 +++ linux-3.0.8/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
16788 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16789 EXPORT_SYMBOL(copy_user_generic_string);
16790 EXPORT_SYMBOL(copy_user_generic_unrolled);
16791 EXPORT_SYMBOL(__copy_user_nocache);
16792 -EXPORT_SYMBOL(_copy_from_user);
16793 -EXPORT_SYMBOL(_copy_to_user);
16794
16795 EXPORT_SYMBOL(copy_page);
16796 EXPORT_SYMBOL(clear_page);
16797 diff -urNp linux-3.0.8/arch/x86/kernel/xsave.c linux-3.0.8/arch/x86/kernel/xsave.c
16798 --- linux-3.0.8/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
16799 +++ linux-3.0.8/arch/x86/kernel/xsave.c 2011-10-06 04:17:55.000000000 -0400
16800 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16801 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16802 return -EINVAL;
16803
16804 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16805 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16806 fx_sw_user->extended_size -
16807 FP_XSTATE_MAGIC2_SIZE));
16808 if (err)
16809 @@ -267,7 +267,7 @@ fx_only:
16810 * the other extended state.
16811 */
16812 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16813 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16814 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16815 }
16816
16817 /*
16818 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16819 if (use_xsave())
16820 err = restore_user_xstate(buf);
16821 else
16822 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
16823 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16824 buf);
16825 if (unlikely(err)) {
16826 /*
16827 diff -urNp linux-3.0.8/arch/x86/kvm/emulate.c linux-3.0.8/arch/x86/kvm/emulate.c
16828 --- linux-3.0.8/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
16829 +++ linux-3.0.8/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
16830 @@ -96,7 +96,7 @@
16831 #define Src2ImmByte (2<<29)
16832 #define Src2One (3<<29)
16833 #define Src2Imm (4<<29)
16834 -#define Src2Mask (7<<29)
16835 +#define Src2Mask (7U<<29)
16836
16837 #define X2(x...) x, x
16838 #define X3(x...) X2(x), x
16839 @@ -207,6 +207,7 @@ struct gprefix {
16840
16841 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16842 do { \
16843 + unsigned long _tmp; \
16844 __asm__ __volatile__ ( \
16845 _PRE_EFLAGS("0", "4", "2") \
16846 _op _suffix " %"_x"3,%1; " \
16847 @@ -220,8 +221,6 @@ struct gprefix {
16848 /* Raw emulation: instruction has two explicit operands. */
16849 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16850 do { \
16851 - unsigned long _tmp; \
16852 - \
16853 switch ((_dst).bytes) { \
16854 case 2: \
16855 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16856 @@ -237,7 +236,6 @@ struct gprefix {
16857
16858 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16859 do { \
16860 - unsigned long _tmp; \
16861 switch ((_dst).bytes) { \
16862 case 1: \
16863 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16864 diff -urNp linux-3.0.8/arch/x86/kvm/lapic.c linux-3.0.8/arch/x86/kvm/lapic.c
16865 --- linux-3.0.8/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16866 +++ linux-3.0.8/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16867 @@ -53,7 +53,7 @@
16868 #define APIC_BUS_CYCLE_NS 1
16869
16870 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16871 -#define apic_debug(fmt, arg...)
16872 +#define apic_debug(fmt, arg...) do {} while (0)
16873
16874 #define APIC_LVT_NUM 6
16875 /* 14 is the version for Xeon and Pentium 8.4.8*/
16876 diff -urNp linux-3.0.8/arch/x86/kvm/mmu.c linux-3.0.8/arch/x86/kvm/mmu.c
16877 --- linux-3.0.8/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16878 +++ linux-3.0.8/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16879 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16880
16881 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16882
16883 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16884 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16885
16886 /*
16887 * Assume that the pte write on a page table of the same type
16888 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16889 }
16890
16891 spin_lock(&vcpu->kvm->mmu_lock);
16892 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16893 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16894 gentry = 0;
16895 kvm_mmu_free_some_pages(vcpu);
16896 ++vcpu->kvm->stat.mmu_pte_write;
16897 diff -urNp linux-3.0.8/arch/x86/kvm/paging_tmpl.h linux-3.0.8/arch/x86/kvm/paging_tmpl.h
16898 --- linux-3.0.8/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16899 +++ linux-3.0.8/arch/x86/kvm/paging_tmpl.h 2011-10-06 04:17:55.000000000 -0400
16900 @@ -182,7 +182,7 @@ walk:
16901 break;
16902 }
16903
16904 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16905 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16906 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
16907 present = false;
16908 break;
16909 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16910 unsigned long mmu_seq;
16911 bool map_writable;
16912
16913 + pax_track_stack();
16914 +
16915 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16916
16917 r = mmu_topup_memory_caches(vcpu);
16918 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16919 if (need_flush)
16920 kvm_flush_remote_tlbs(vcpu->kvm);
16921
16922 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16923 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16924
16925 spin_unlock(&vcpu->kvm->mmu_lock);
16926
16927 diff -urNp linux-3.0.8/arch/x86/kvm/svm.c linux-3.0.8/arch/x86/kvm/svm.c
16928 --- linux-3.0.8/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16929 +++ linux-3.0.8/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16930 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16931 int cpu = raw_smp_processor_id();
16932
16933 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16934 +
16935 + pax_open_kernel();
16936 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16937 + pax_close_kernel();
16938 +
16939 load_TR_desc();
16940 }
16941
16942 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16943 #endif
16944 #endif
16945
16946 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16947 + __set_fs(current_thread_info()->addr_limit);
16948 +#endif
16949 +
16950 reload_tss(vcpu);
16951
16952 local_irq_disable();
16953 diff -urNp linux-3.0.8/arch/x86/kvm/vmx.c linux-3.0.8/arch/x86/kvm/vmx.c
16954 --- linux-3.0.8/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16955 +++ linux-3.0.8/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16956 @@ -797,7 +797,11 @@ static void reload_tss(void)
16957 struct desc_struct *descs;
16958
16959 descs = (void *)gdt->address;
16960 +
16961 + pax_open_kernel();
16962 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16963 + pax_close_kernel();
16964 +
16965 load_TR_desc();
16966 }
16967
16968 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16969 if (!cpu_has_vmx_flexpriority())
16970 flexpriority_enabled = 0;
16971
16972 - if (!cpu_has_vmx_tpr_shadow())
16973 - kvm_x86_ops->update_cr8_intercept = NULL;
16974 + if (!cpu_has_vmx_tpr_shadow()) {
16975 + pax_open_kernel();
16976 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16977 + pax_close_kernel();
16978 + }
16979
16980 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16981 kvm_disable_largepages();
16982 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16983 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16984
16985 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16986 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16987 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16988 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16989 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16990 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16991 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16992 "jmp .Lkvm_vmx_return \n\t"
16993 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16994 ".Lkvm_vmx_return: "
16995 +
16996 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16997 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16998 + ".Lkvm_vmx_return2: "
16999 +#endif
17000 +
17001 /* Save guest registers, load host registers, keep flags */
17002 "mov %0, %c[wordsize](%%"R"sp) \n\t"
17003 "pop %0 \n\t"
17004 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
17005 #endif
17006 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
17007 [wordsize]"i"(sizeof(ulong))
17008 +
17009 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17010 + ,[cs]"i"(__KERNEL_CS)
17011 +#endif
17012 +
17013 : "cc", "memory"
17014 , R"ax", R"bx", R"di", R"si"
17015 #ifdef CONFIG_X86_64
17016 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
17017
17018 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
17019
17020 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17021 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17022 +
17023 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17024 + loadsegment(fs, __KERNEL_PERCPU);
17025 +#endif
17026 +
17027 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17028 + __set_fs(current_thread_info()->addr_limit);
17029 +#endif
17030 +
17031 vmx->launched = 1;
17032
17033 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
17034 diff -urNp linux-3.0.8/arch/x86/kvm/x86.c linux-3.0.8/arch/x86/kvm/x86.c
17035 --- linux-3.0.8/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
17036 +++ linux-3.0.8/arch/x86/kvm/x86.c 2011-10-06 04:17:55.000000000 -0400
17037 @@ -1313,8 +1313,8 @@ static int xen_hvm_config(struct kvm_vcp
17038 {
17039 struct kvm *kvm = vcpu->kvm;
17040 int lm = is_long_mode(vcpu);
17041 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17042 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17043 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17044 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17045 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
17046 : kvm->arch.xen_hvm_config.blob_size_32;
17047 u32 page_num = data & ~PAGE_MASK;
17048 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
17049 if (n < msr_list.nmsrs)
17050 goto out;
17051 r = -EFAULT;
17052 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
17053 + goto out;
17054 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
17055 num_msrs_to_save * sizeof(u32)))
17056 goto out;
17057 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17058 struct kvm_cpuid2 *cpuid,
17059 struct kvm_cpuid_entry2 __user *entries)
17060 {
17061 - int r;
17062 + int r, i;
17063
17064 r = -E2BIG;
17065 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17066 goto out;
17067 r = -EFAULT;
17068 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17069 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17070 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17071 goto out;
17072 + for (i = 0; i < cpuid->nent; ++i) {
17073 + struct kvm_cpuid_entry2 cpuid_entry;
17074 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17075 + goto out;
17076 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
17077 + }
17078 vcpu->arch.cpuid_nent = cpuid->nent;
17079 kvm_apic_set_version(vcpu);
17080 kvm_x86_ops->cpuid_update(vcpu);
17081 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17082 struct kvm_cpuid2 *cpuid,
17083 struct kvm_cpuid_entry2 __user *entries)
17084 {
17085 - int r;
17086 + int r, i;
17087
17088 r = -E2BIG;
17089 if (cpuid->nent < vcpu->arch.cpuid_nent)
17090 goto out;
17091 r = -EFAULT;
17092 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17093 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17094 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17095 goto out;
17096 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17097 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17098 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17099 + goto out;
17100 + }
17101 return 0;
17102
17103 out:
17104 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17105 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17106 struct kvm_interrupt *irq)
17107 {
17108 - if (irq->irq < 0 || irq->irq >= 256)
17109 + if (irq->irq >= 256)
17110 return -EINVAL;
17111 if (irqchip_in_kernel(vcpu->kvm))
17112 return -ENXIO;
17113 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
17114 }
17115 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
17116
17117 -int kvm_arch_init(void *opaque)
17118 +int kvm_arch_init(const void *opaque)
17119 {
17120 int r;
17121 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17122 diff -urNp linux-3.0.8/arch/x86/lguest/boot.c linux-3.0.8/arch/x86/lguest/boot.c
17123 --- linux-3.0.8/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
17124 +++ linux-3.0.8/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
17125 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
17126 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
17127 * Launcher to reboot us.
17128 */
17129 -static void lguest_restart(char *reason)
17130 +static __noreturn void lguest_restart(char *reason)
17131 {
17132 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
17133 + BUG();
17134 }
17135
17136 /*G:050
17137 diff -urNp linux-3.0.8/arch/x86/lib/atomic64_32.c linux-3.0.8/arch/x86/lib/atomic64_32.c
17138 --- linux-3.0.8/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
17139 +++ linux-3.0.8/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
17140 @@ -8,18 +8,30 @@
17141
17142 long long atomic64_read_cx8(long long, const atomic64_t *v);
17143 EXPORT_SYMBOL(atomic64_read_cx8);
17144 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17145 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
17146 long long atomic64_set_cx8(long long, const atomic64_t *v);
17147 EXPORT_SYMBOL(atomic64_set_cx8);
17148 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17149 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
17150 long long atomic64_xchg_cx8(long long, unsigned high);
17151 EXPORT_SYMBOL(atomic64_xchg_cx8);
17152 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
17153 EXPORT_SYMBOL(atomic64_add_return_cx8);
17154 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17155 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
17156 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
17157 EXPORT_SYMBOL(atomic64_sub_return_cx8);
17158 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17159 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
17160 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
17161 EXPORT_SYMBOL(atomic64_inc_return_cx8);
17162 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17163 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
17164 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
17165 EXPORT_SYMBOL(atomic64_dec_return_cx8);
17166 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17167 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
17168 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
17169 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
17170 int atomic64_inc_not_zero_cx8(atomic64_t *v);
17171 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
17172 #ifndef CONFIG_X86_CMPXCHG64
17173 long long atomic64_read_386(long long, const atomic64_t *v);
17174 EXPORT_SYMBOL(atomic64_read_386);
17175 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
17176 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
17177 long long atomic64_set_386(long long, const atomic64_t *v);
17178 EXPORT_SYMBOL(atomic64_set_386);
17179 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
17180 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
17181 long long atomic64_xchg_386(long long, unsigned high);
17182 EXPORT_SYMBOL(atomic64_xchg_386);
17183 long long atomic64_add_return_386(long long a, atomic64_t *v);
17184 EXPORT_SYMBOL(atomic64_add_return_386);
17185 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17186 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
17187 long long atomic64_sub_return_386(long long a, atomic64_t *v);
17188 EXPORT_SYMBOL(atomic64_sub_return_386);
17189 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17190 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
17191 long long atomic64_inc_return_386(long long a, atomic64_t *v);
17192 EXPORT_SYMBOL(atomic64_inc_return_386);
17193 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17194 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
17195 long long atomic64_dec_return_386(long long a, atomic64_t *v);
17196 EXPORT_SYMBOL(atomic64_dec_return_386);
17197 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17198 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
17199 long long atomic64_add_386(long long a, atomic64_t *v);
17200 EXPORT_SYMBOL(atomic64_add_386);
17201 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
17202 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
17203 long long atomic64_sub_386(long long a, atomic64_t *v);
17204 EXPORT_SYMBOL(atomic64_sub_386);
17205 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
17206 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
17207 long long atomic64_inc_386(long long a, atomic64_t *v);
17208 EXPORT_SYMBOL(atomic64_inc_386);
17209 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
17210 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
17211 long long atomic64_dec_386(long long a, atomic64_t *v);
17212 EXPORT_SYMBOL(atomic64_dec_386);
17213 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
17214 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
17215 long long atomic64_dec_if_positive_386(atomic64_t *v);
17216 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
17217 int atomic64_inc_not_zero_386(atomic64_t *v);
17218 diff -urNp linux-3.0.8/arch/x86/lib/atomic64_386_32.S linux-3.0.8/arch/x86/lib/atomic64_386_32.S
17219 --- linux-3.0.8/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
17220 +++ linux-3.0.8/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
17221 @@ -48,6 +48,10 @@ BEGIN(read)
17222 movl (v), %eax
17223 movl 4(v), %edx
17224 RET_ENDP
17225 +BEGIN(read_unchecked)
17226 + movl (v), %eax
17227 + movl 4(v), %edx
17228 +RET_ENDP
17229 #undef v
17230
17231 #define v %esi
17232 @@ -55,6 +59,10 @@ BEGIN(set)
17233 movl %ebx, (v)
17234 movl %ecx, 4(v)
17235 RET_ENDP
17236 +BEGIN(set_unchecked)
17237 + movl %ebx, (v)
17238 + movl %ecx, 4(v)
17239 +RET_ENDP
17240 #undef v
17241
17242 #define v %esi
17243 @@ -70,6 +78,20 @@ RET_ENDP
17244 BEGIN(add)
17245 addl %eax, (v)
17246 adcl %edx, 4(v)
17247 +
17248 +#ifdef CONFIG_PAX_REFCOUNT
17249 + jno 0f
17250 + subl %eax, (v)
17251 + sbbl %edx, 4(v)
17252 + int $4
17253 +0:
17254 + _ASM_EXTABLE(0b, 0b)
17255 +#endif
17256 +
17257 +RET_ENDP
17258 +BEGIN(add_unchecked)
17259 + addl %eax, (v)
17260 + adcl %edx, 4(v)
17261 RET_ENDP
17262 #undef v
17263
17264 @@ -77,6 +99,24 @@ RET_ENDP
17265 BEGIN(add_return)
17266 addl (v), %eax
17267 adcl 4(v), %edx
17268 +
17269 +#ifdef CONFIG_PAX_REFCOUNT
17270 + into
17271 +1234:
17272 + _ASM_EXTABLE(1234b, 2f)
17273 +#endif
17274 +
17275 + movl %eax, (v)
17276 + movl %edx, 4(v)
17277 +
17278 +#ifdef CONFIG_PAX_REFCOUNT
17279 +2:
17280 +#endif
17281 +
17282 +RET_ENDP
17283 +BEGIN(add_return_unchecked)
17284 + addl (v), %eax
17285 + adcl 4(v), %edx
17286 movl %eax, (v)
17287 movl %edx, 4(v)
17288 RET_ENDP
17289 @@ -86,6 +126,20 @@ RET_ENDP
17290 BEGIN(sub)
17291 subl %eax, (v)
17292 sbbl %edx, 4(v)
17293 +
17294 +#ifdef CONFIG_PAX_REFCOUNT
17295 + jno 0f
17296 + addl %eax, (v)
17297 + adcl %edx, 4(v)
17298 + int $4
17299 +0:
17300 + _ASM_EXTABLE(0b, 0b)
17301 +#endif
17302 +
17303 +RET_ENDP
17304 +BEGIN(sub_unchecked)
17305 + subl %eax, (v)
17306 + sbbl %edx, 4(v)
17307 RET_ENDP
17308 #undef v
17309
17310 @@ -96,6 +150,27 @@ BEGIN(sub_return)
17311 sbbl $0, %edx
17312 addl (v), %eax
17313 adcl 4(v), %edx
17314 +
17315 +#ifdef CONFIG_PAX_REFCOUNT
17316 + into
17317 +1234:
17318 + _ASM_EXTABLE(1234b, 2f)
17319 +#endif
17320 +
17321 + movl %eax, (v)
17322 + movl %edx, 4(v)
17323 +
17324 +#ifdef CONFIG_PAX_REFCOUNT
17325 +2:
17326 +#endif
17327 +
17328 +RET_ENDP
17329 +BEGIN(sub_return_unchecked)
17330 + negl %edx
17331 + negl %eax
17332 + sbbl $0, %edx
17333 + addl (v), %eax
17334 + adcl 4(v), %edx
17335 movl %eax, (v)
17336 movl %edx, 4(v)
17337 RET_ENDP
17338 @@ -105,6 +180,20 @@ RET_ENDP
17339 BEGIN(inc)
17340 addl $1, (v)
17341 adcl $0, 4(v)
17342 +
17343 +#ifdef CONFIG_PAX_REFCOUNT
17344 + jno 0f
17345 + subl $1, (v)
17346 + sbbl $0, 4(v)
17347 + int $4
17348 +0:
17349 + _ASM_EXTABLE(0b, 0b)
17350 +#endif
17351 +
17352 +RET_ENDP
17353 +BEGIN(inc_unchecked)
17354 + addl $1, (v)
17355 + adcl $0, 4(v)
17356 RET_ENDP
17357 #undef v
17358
17359 @@ -114,6 +203,26 @@ BEGIN(inc_return)
17360 movl 4(v), %edx
17361 addl $1, %eax
17362 adcl $0, %edx
17363 +
17364 +#ifdef CONFIG_PAX_REFCOUNT
17365 + into
17366 +1234:
17367 + _ASM_EXTABLE(1234b, 2f)
17368 +#endif
17369 +
17370 + movl %eax, (v)
17371 + movl %edx, 4(v)
17372 +
17373 +#ifdef CONFIG_PAX_REFCOUNT
17374 +2:
17375 +#endif
17376 +
17377 +RET_ENDP
17378 +BEGIN(inc_return_unchecked)
17379 + movl (v), %eax
17380 + movl 4(v), %edx
17381 + addl $1, %eax
17382 + adcl $0, %edx
17383 movl %eax, (v)
17384 movl %edx, 4(v)
17385 RET_ENDP
17386 @@ -123,6 +232,20 @@ RET_ENDP
17387 BEGIN(dec)
17388 subl $1, (v)
17389 sbbl $0, 4(v)
17390 +
17391 +#ifdef CONFIG_PAX_REFCOUNT
17392 + jno 0f
17393 + addl $1, (v)
17394 + adcl $0, 4(v)
17395 + int $4
17396 +0:
17397 + _ASM_EXTABLE(0b, 0b)
17398 +#endif
17399 +
17400 +RET_ENDP
17401 +BEGIN(dec_unchecked)
17402 + subl $1, (v)
17403 + sbbl $0, 4(v)
17404 RET_ENDP
17405 #undef v
17406
17407 @@ -132,6 +255,26 @@ BEGIN(dec_return)
17408 movl 4(v), %edx
17409 subl $1, %eax
17410 sbbl $0, %edx
17411 +
17412 +#ifdef CONFIG_PAX_REFCOUNT
17413 + into
17414 +1234:
17415 + _ASM_EXTABLE(1234b, 2f)
17416 +#endif
17417 +
17418 + movl %eax, (v)
17419 + movl %edx, 4(v)
17420 +
17421 +#ifdef CONFIG_PAX_REFCOUNT
17422 +2:
17423 +#endif
17424 +
17425 +RET_ENDP
17426 +BEGIN(dec_return_unchecked)
17427 + movl (v), %eax
17428 + movl 4(v), %edx
17429 + subl $1, %eax
17430 + sbbl $0, %edx
17431 movl %eax, (v)
17432 movl %edx, 4(v)
17433 RET_ENDP
17434 @@ -143,6 +286,13 @@ BEGIN(add_unless)
17435 adcl %edx, %edi
17436 addl (v), %eax
17437 adcl 4(v), %edx
17438 +
17439 +#ifdef CONFIG_PAX_REFCOUNT
17440 + into
17441 +1234:
17442 + _ASM_EXTABLE(1234b, 2f)
17443 +#endif
17444 +
17445 cmpl %eax, %esi
17446 je 3f
17447 1:
17448 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
17449 1:
17450 addl $1, %eax
17451 adcl $0, %edx
17452 +
17453 +#ifdef CONFIG_PAX_REFCOUNT
17454 + into
17455 +1234:
17456 + _ASM_EXTABLE(1234b, 2f)
17457 +#endif
17458 +
17459 movl %eax, (v)
17460 movl %edx, 4(v)
17461 movl $1, %eax
17462 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
17463 movl 4(v), %edx
17464 subl $1, %eax
17465 sbbl $0, %edx
17466 +
17467 +#ifdef CONFIG_PAX_REFCOUNT
17468 + into
17469 +1234:
17470 + _ASM_EXTABLE(1234b, 1f)
17471 +#endif
17472 +
17473 js 1f
17474 movl %eax, (v)
17475 movl %edx, 4(v)
17476 diff -urNp linux-3.0.8/arch/x86/lib/atomic64_cx8_32.S linux-3.0.8/arch/x86/lib/atomic64_cx8_32.S
17477 --- linux-3.0.8/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
17478 +++ linux-3.0.8/arch/x86/lib/atomic64_cx8_32.S 2011-10-06 04:17:55.000000000 -0400
17479 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
17480 CFI_STARTPROC
17481
17482 read64 %ecx
17483 + pax_force_retaddr
17484 ret
17485 CFI_ENDPROC
17486 ENDPROC(atomic64_read_cx8)
17487
17488 +ENTRY(atomic64_read_unchecked_cx8)
17489 + CFI_STARTPROC
17490 +
17491 + read64 %ecx
17492 + pax_force_retaddr
17493 + ret
17494 + CFI_ENDPROC
17495 +ENDPROC(atomic64_read_unchecked_cx8)
17496 +
17497 ENTRY(atomic64_set_cx8)
17498 CFI_STARTPROC
17499
17500 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
17501 cmpxchg8b (%esi)
17502 jne 1b
17503
17504 + pax_force_retaddr
17505 ret
17506 CFI_ENDPROC
17507 ENDPROC(atomic64_set_cx8)
17508
17509 +ENTRY(atomic64_set_unchecked_cx8)
17510 + CFI_STARTPROC
17511 +
17512 +1:
17513 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
17514 + * are atomic on 586 and newer */
17515 + cmpxchg8b (%esi)
17516 + jne 1b
17517 +
17518 + pax_force_retaddr
17519 + ret
17520 + CFI_ENDPROC
17521 +ENDPROC(atomic64_set_unchecked_cx8)
17522 +
17523 ENTRY(atomic64_xchg_cx8)
17524 CFI_STARTPROC
17525
17526 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17527 cmpxchg8b (%esi)
17528 jne 1b
17529
17530 + pax_force_retaddr
17531 ret
17532 CFI_ENDPROC
17533 ENDPROC(atomic64_xchg_cx8)
17534
17535 -.macro addsub_return func ins insc
17536 -ENTRY(atomic64_\func\()_return_cx8)
17537 +.macro addsub_return func ins insc unchecked=""
17538 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17539 CFI_STARTPROC
17540 SAVE ebp
17541 SAVE ebx
17542 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17543 movl %edx, %ecx
17544 \ins\()l %esi, %ebx
17545 \insc\()l %edi, %ecx
17546 +
17547 +.ifb \unchecked
17548 +#ifdef CONFIG_PAX_REFCOUNT
17549 + into
17550 +2:
17551 + _ASM_EXTABLE(2b, 3f)
17552 +#endif
17553 +.endif
17554 +
17555 LOCK_PREFIX
17556 cmpxchg8b (%ebp)
17557 jne 1b
17558 -
17559 -10:
17560 movl %ebx, %eax
17561 movl %ecx, %edx
17562 +
17563 +.ifb \unchecked
17564 +#ifdef CONFIG_PAX_REFCOUNT
17565 +3:
17566 +#endif
17567 +.endif
17568 +
17569 RESTORE edi
17570 RESTORE esi
17571 RESTORE ebx
17572 RESTORE ebp
17573 + pax_force_retaddr
17574 ret
17575 CFI_ENDPROC
17576 -ENDPROC(atomic64_\func\()_return_cx8)
17577 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17578 .endm
17579
17580 addsub_return add add adc
17581 addsub_return sub sub sbb
17582 +addsub_return add add adc _unchecked
17583 +addsub_return sub sub sbb _unchecked
17584
17585 -.macro incdec_return func ins insc
17586 -ENTRY(atomic64_\func\()_return_cx8)
17587 +.macro incdec_return func ins insc unchecked
17588 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17589 CFI_STARTPROC
17590 SAVE ebx
17591
17592 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17593 movl %edx, %ecx
17594 \ins\()l $1, %ebx
17595 \insc\()l $0, %ecx
17596 +
17597 +.ifb \unchecked
17598 +#ifdef CONFIG_PAX_REFCOUNT
17599 + into
17600 +2:
17601 + _ASM_EXTABLE(2b, 3f)
17602 +#endif
17603 +.endif
17604 +
17605 LOCK_PREFIX
17606 cmpxchg8b (%esi)
17607 jne 1b
17608
17609 -10:
17610 movl %ebx, %eax
17611 movl %ecx, %edx
17612 +
17613 +.ifb \unchecked
17614 +#ifdef CONFIG_PAX_REFCOUNT
17615 +3:
17616 +#endif
17617 +.endif
17618 +
17619 RESTORE ebx
17620 + pax_force_retaddr
17621 ret
17622 CFI_ENDPROC
17623 -ENDPROC(atomic64_\func\()_return_cx8)
17624 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17625 .endm
17626
17627 incdec_return inc add adc
17628 incdec_return dec sub sbb
17629 +incdec_return inc add adc _unchecked
17630 +incdec_return dec sub sbb _unchecked
17631
17632 ENTRY(atomic64_dec_if_positive_cx8)
17633 CFI_STARTPROC
17634 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17635 movl %edx, %ecx
17636 subl $1, %ebx
17637 sbb $0, %ecx
17638 +
17639 +#ifdef CONFIG_PAX_REFCOUNT
17640 + into
17641 +1234:
17642 + _ASM_EXTABLE(1234b, 2f)
17643 +#endif
17644 +
17645 js 2f
17646 LOCK_PREFIX
17647 cmpxchg8b (%esi)
17648 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17649 movl %ebx, %eax
17650 movl %ecx, %edx
17651 RESTORE ebx
17652 + pax_force_retaddr
17653 ret
17654 CFI_ENDPROC
17655 ENDPROC(atomic64_dec_if_positive_cx8)
17656 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17657 movl %edx, %ecx
17658 addl %esi, %ebx
17659 adcl %edi, %ecx
17660 +
17661 +#ifdef CONFIG_PAX_REFCOUNT
17662 + into
17663 +1234:
17664 + _ASM_EXTABLE(1234b, 3f)
17665 +#endif
17666 +
17667 LOCK_PREFIX
17668 cmpxchg8b (%ebp)
17669 jne 1b
17670 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17671 CFI_ADJUST_CFA_OFFSET -8
17672 RESTORE ebx
17673 RESTORE ebp
17674 + pax_force_retaddr
17675 ret
17676 4:
17677 cmpl %edx, 4(%esp)
17678 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17679 movl %edx, %ecx
17680 addl $1, %ebx
17681 adcl $0, %ecx
17682 +
17683 +#ifdef CONFIG_PAX_REFCOUNT
17684 + into
17685 +1234:
17686 + _ASM_EXTABLE(1234b, 3f)
17687 +#endif
17688 +
17689 LOCK_PREFIX
17690 cmpxchg8b (%esi)
17691 jne 1b
17692 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17693 movl $1, %eax
17694 3:
17695 RESTORE ebx
17696 + pax_force_retaddr
17697 ret
17698 4:
17699 testl %edx, %edx
17700 diff -urNp linux-3.0.8/arch/x86/lib/checksum_32.S linux-3.0.8/arch/x86/lib/checksum_32.S
17701 --- linux-3.0.8/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
17702 +++ linux-3.0.8/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
17703 @@ -28,7 +28,8 @@
17704 #include <linux/linkage.h>
17705 #include <asm/dwarf2.h>
17706 #include <asm/errno.h>
17707 -
17708 +#include <asm/segment.h>
17709 +
17710 /*
17711 * computes a partial checksum, e.g. for TCP/UDP fragments
17712 */
17713 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17714
17715 #define ARGBASE 16
17716 #define FP 12
17717 -
17718 -ENTRY(csum_partial_copy_generic)
17719 +
17720 +ENTRY(csum_partial_copy_generic_to_user)
17721 CFI_STARTPROC
17722 +
17723 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17724 + pushl_cfi %gs
17725 + popl_cfi %es
17726 + jmp csum_partial_copy_generic
17727 +#endif
17728 +
17729 +ENTRY(csum_partial_copy_generic_from_user)
17730 +
17731 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17732 + pushl_cfi %gs
17733 + popl_cfi %ds
17734 +#endif
17735 +
17736 +ENTRY(csum_partial_copy_generic)
17737 subl $4,%esp
17738 CFI_ADJUST_CFA_OFFSET 4
17739 pushl_cfi %edi
17740 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17741 jmp 4f
17742 SRC(1: movw (%esi), %bx )
17743 addl $2, %esi
17744 -DST( movw %bx, (%edi) )
17745 +DST( movw %bx, %es:(%edi) )
17746 addl $2, %edi
17747 addw %bx, %ax
17748 adcl $0, %eax
17749 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17750 SRC(1: movl (%esi), %ebx )
17751 SRC( movl 4(%esi), %edx )
17752 adcl %ebx, %eax
17753 -DST( movl %ebx, (%edi) )
17754 +DST( movl %ebx, %es:(%edi) )
17755 adcl %edx, %eax
17756 -DST( movl %edx, 4(%edi) )
17757 +DST( movl %edx, %es:4(%edi) )
17758
17759 SRC( movl 8(%esi), %ebx )
17760 SRC( movl 12(%esi), %edx )
17761 adcl %ebx, %eax
17762 -DST( movl %ebx, 8(%edi) )
17763 +DST( movl %ebx, %es:8(%edi) )
17764 adcl %edx, %eax
17765 -DST( movl %edx, 12(%edi) )
17766 +DST( movl %edx, %es:12(%edi) )
17767
17768 SRC( movl 16(%esi), %ebx )
17769 SRC( movl 20(%esi), %edx )
17770 adcl %ebx, %eax
17771 -DST( movl %ebx, 16(%edi) )
17772 +DST( movl %ebx, %es:16(%edi) )
17773 adcl %edx, %eax
17774 -DST( movl %edx, 20(%edi) )
17775 +DST( movl %edx, %es:20(%edi) )
17776
17777 SRC( movl 24(%esi), %ebx )
17778 SRC( movl 28(%esi), %edx )
17779 adcl %ebx, %eax
17780 -DST( movl %ebx, 24(%edi) )
17781 +DST( movl %ebx, %es:24(%edi) )
17782 adcl %edx, %eax
17783 -DST( movl %edx, 28(%edi) )
17784 +DST( movl %edx, %es:28(%edi) )
17785
17786 lea 32(%esi), %esi
17787 lea 32(%edi), %edi
17788 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17789 shrl $2, %edx # This clears CF
17790 SRC(3: movl (%esi), %ebx )
17791 adcl %ebx, %eax
17792 -DST( movl %ebx, (%edi) )
17793 +DST( movl %ebx, %es:(%edi) )
17794 lea 4(%esi), %esi
17795 lea 4(%edi), %edi
17796 dec %edx
17797 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17798 jb 5f
17799 SRC( movw (%esi), %cx )
17800 leal 2(%esi), %esi
17801 -DST( movw %cx, (%edi) )
17802 +DST( movw %cx, %es:(%edi) )
17803 leal 2(%edi), %edi
17804 je 6f
17805 shll $16,%ecx
17806 SRC(5: movb (%esi), %cl )
17807 -DST( movb %cl, (%edi) )
17808 +DST( movb %cl, %es:(%edi) )
17809 6: addl %ecx, %eax
17810 adcl $0, %eax
17811 7:
17812 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17813
17814 6001:
17815 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17816 - movl $-EFAULT, (%ebx)
17817 + movl $-EFAULT, %ss:(%ebx)
17818
17819 # zero the complete destination - computing the rest
17820 # is too much work
17821 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17822
17823 6002:
17824 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17825 - movl $-EFAULT,(%ebx)
17826 + movl $-EFAULT,%ss:(%ebx)
17827 jmp 5000b
17828
17829 .previous
17830
17831 + pushl_cfi %ss
17832 + popl_cfi %ds
17833 + pushl_cfi %ss
17834 + popl_cfi %es
17835 popl_cfi %ebx
17836 CFI_RESTORE ebx
17837 popl_cfi %esi
17838 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17839 popl_cfi %ecx # equivalent to addl $4,%esp
17840 ret
17841 CFI_ENDPROC
17842 -ENDPROC(csum_partial_copy_generic)
17843 +ENDPROC(csum_partial_copy_generic_to_user)
17844
17845 #else
17846
17847 /* Version for PentiumII/PPro */
17848
17849 #define ROUND1(x) \
17850 + nop; nop; nop; \
17851 SRC(movl x(%esi), %ebx ) ; \
17852 addl %ebx, %eax ; \
17853 - DST(movl %ebx, x(%edi) ) ;
17854 + DST(movl %ebx, %es:x(%edi)) ;
17855
17856 #define ROUND(x) \
17857 + nop; nop; nop; \
17858 SRC(movl x(%esi), %ebx ) ; \
17859 adcl %ebx, %eax ; \
17860 - DST(movl %ebx, x(%edi) ) ;
17861 + DST(movl %ebx, %es:x(%edi)) ;
17862
17863 #define ARGBASE 12
17864 -
17865 -ENTRY(csum_partial_copy_generic)
17866 +
17867 +ENTRY(csum_partial_copy_generic_to_user)
17868 CFI_STARTPROC
17869 +
17870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17871 + pushl_cfi %gs
17872 + popl_cfi %es
17873 + jmp csum_partial_copy_generic
17874 +#endif
17875 +
17876 +ENTRY(csum_partial_copy_generic_from_user)
17877 +
17878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17879 + pushl_cfi %gs
17880 + popl_cfi %ds
17881 +#endif
17882 +
17883 +ENTRY(csum_partial_copy_generic)
17884 pushl_cfi %ebx
17885 CFI_REL_OFFSET ebx, 0
17886 pushl_cfi %edi
17887 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17888 subl %ebx, %edi
17889 lea -1(%esi),%edx
17890 andl $-32,%edx
17891 - lea 3f(%ebx,%ebx), %ebx
17892 + lea 3f(%ebx,%ebx,2), %ebx
17893 testl %esi, %esi
17894 jmp *%ebx
17895 1: addl $64,%esi
17896 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17897 jb 5f
17898 SRC( movw (%esi), %dx )
17899 leal 2(%esi), %esi
17900 -DST( movw %dx, (%edi) )
17901 +DST( movw %dx, %es:(%edi) )
17902 leal 2(%edi), %edi
17903 je 6f
17904 shll $16,%edx
17905 5:
17906 SRC( movb (%esi), %dl )
17907 -DST( movb %dl, (%edi) )
17908 +DST( movb %dl, %es:(%edi) )
17909 6: addl %edx, %eax
17910 adcl $0, %eax
17911 7:
17912 .section .fixup, "ax"
17913 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17914 - movl $-EFAULT, (%ebx)
17915 + movl $-EFAULT, %ss:(%ebx)
17916 # zero the complete destination (computing the rest is too much work)
17917 movl ARGBASE+8(%esp),%edi # dst
17918 movl ARGBASE+12(%esp),%ecx # len
17919 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17920 rep; stosb
17921 jmp 7b
17922 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17923 - movl $-EFAULT, (%ebx)
17924 + movl $-EFAULT, %ss:(%ebx)
17925 jmp 7b
17926 .previous
17927
17928 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17929 + pushl_cfi %ss
17930 + popl_cfi %ds
17931 + pushl_cfi %ss
17932 + popl_cfi %es
17933 +#endif
17934 +
17935 popl_cfi %esi
17936 CFI_RESTORE esi
17937 popl_cfi %edi
17938 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17939 CFI_RESTORE ebx
17940 ret
17941 CFI_ENDPROC
17942 -ENDPROC(csum_partial_copy_generic)
17943 +ENDPROC(csum_partial_copy_generic_to_user)
17944
17945 #undef ROUND
17946 #undef ROUND1
17947 diff -urNp linux-3.0.8/arch/x86/lib/clear_page_64.S linux-3.0.8/arch/x86/lib/clear_page_64.S
17948 --- linux-3.0.8/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17949 +++ linux-3.0.8/arch/x86/lib/clear_page_64.S 2011-10-06 04:17:55.000000000 -0400
17950 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
17951 movl $4096/8,%ecx
17952 xorl %eax,%eax
17953 rep stosq
17954 + pax_force_retaddr
17955 ret
17956 CFI_ENDPROC
17957 ENDPROC(clear_page_c)
17958 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
17959 movl $4096,%ecx
17960 xorl %eax,%eax
17961 rep stosb
17962 + pax_force_retaddr
17963 ret
17964 CFI_ENDPROC
17965 ENDPROC(clear_page_c_e)
17966 @@ -43,6 +45,7 @@ ENTRY(clear_page)
17967 leaq 64(%rdi),%rdi
17968 jnz .Lloop
17969 nop
17970 + pax_force_retaddr
17971 ret
17972 CFI_ENDPROC
17973 .Lclear_page_end:
17974 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
17975
17976 #include <asm/cpufeature.h>
17977
17978 - .section .altinstr_replacement,"ax"
17979 + .section .altinstr_replacement,"a"
17980 1: .byte 0xeb /* jmp <disp8> */
17981 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17982 2: .byte 0xeb /* jmp <disp8> */
17983 diff -urNp linux-3.0.8/arch/x86/lib/cmpxchg16b_emu.S linux-3.0.8/arch/x86/lib/cmpxchg16b_emu.S
17984 --- linux-3.0.8/arch/x86/lib/cmpxchg16b_emu.S 2011-07-21 22:17:23.000000000 -0400
17985 +++ linux-3.0.8/arch/x86/lib/cmpxchg16b_emu.S 2011-10-07 19:07:28.000000000 -0400
17986 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
17987
17988 popf
17989 mov $1, %al
17990 + pax_force_retaddr
17991 ret
17992
17993 not_same:
17994 popf
17995 xor %al,%al
17996 + pax_force_retaddr
17997 ret
17998
17999 CFI_ENDPROC
18000 diff -urNp linux-3.0.8/arch/x86/lib/copy_page_64.S linux-3.0.8/arch/x86/lib/copy_page_64.S
18001 --- linux-3.0.8/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
18002 +++ linux-3.0.8/arch/x86/lib/copy_page_64.S 2011-10-06 04:17:55.000000000 -0400
18003 @@ -2,12 +2,14 @@
18004
18005 #include <linux/linkage.h>
18006 #include <asm/dwarf2.h>
18007 +#include <asm/alternative-asm.h>
18008
18009 ALIGN
18010 copy_page_c:
18011 CFI_STARTPROC
18012 movl $4096/8,%ecx
18013 rep movsq
18014 + pax_force_retaddr
18015 ret
18016 CFI_ENDPROC
18017 ENDPROC(copy_page_c)
18018 @@ -94,6 +96,7 @@ ENTRY(copy_page)
18019 CFI_RESTORE r13
18020 addq $3*8,%rsp
18021 CFI_ADJUST_CFA_OFFSET -3*8
18022 + pax_force_retaddr
18023 ret
18024 .Lcopy_page_end:
18025 CFI_ENDPROC
18026 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
18027
18028 #include <asm/cpufeature.h>
18029
18030 - .section .altinstr_replacement,"ax"
18031 + .section .altinstr_replacement,"a"
18032 1: .byte 0xeb /* jmp <disp8> */
18033 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18034 2:
18035 diff -urNp linux-3.0.8/arch/x86/lib/copy_user_64.S linux-3.0.8/arch/x86/lib/copy_user_64.S
18036 --- linux-3.0.8/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
18037 +++ linux-3.0.8/arch/x86/lib/copy_user_64.S 2011-10-06 04:17:55.000000000 -0400
18038 @@ -16,6 +16,7 @@
18039 #include <asm/thread_info.h>
18040 #include <asm/cpufeature.h>
18041 #include <asm/alternative-asm.h>
18042 +#include <asm/pgtable.h>
18043
18044 /*
18045 * By placing feature2 after feature1 in altinstructions section, we logically
18046 @@ -29,7 +30,7 @@
18047 .byte 0xe9 /* 32bit jump */
18048 .long \orig-1f /* by default jump to orig */
18049 1:
18050 - .section .altinstr_replacement,"ax"
18051 + .section .altinstr_replacement,"a"
18052 2: .byte 0xe9 /* near jump with 32bit immediate */
18053 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
18054 3: .byte 0xe9 /* near jump with 32bit immediate */
18055 @@ -71,47 +72,20 @@
18056 #endif
18057 .endm
18058
18059 -/* Standard copy_to_user with segment limit checking */
18060 -ENTRY(_copy_to_user)
18061 - CFI_STARTPROC
18062 - GET_THREAD_INFO(%rax)
18063 - movq %rdi,%rcx
18064 - addq %rdx,%rcx
18065 - jc bad_to_user
18066 - cmpq TI_addr_limit(%rax),%rcx
18067 - ja bad_to_user
18068 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18069 - copy_user_generic_unrolled,copy_user_generic_string, \
18070 - copy_user_enhanced_fast_string
18071 - CFI_ENDPROC
18072 -ENDPROC(_copy_to_user)
18073 -
18074 -/* Standard copy_from_user with segment limit checking */
18075 -ENTRY(_copy_from_user)
18076 - CFI_STARTPROC
18077 - GET_THREAD_INFO(%rax)
18078 - movq %rsi,%rcx
18079 - addq %rdx,%rcx
18080 - jc bad_from_user
18081 - cmpq TI_addr_limit(%rax),%rcx
18082 - ja bad_from_user
18083 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18084 - copy_user_generic_unrolled,copy_user_generic_string, \
18085 - copy_user_enhanced_fast_string
18086 - CFI_ENDPROC
18087 -ENDPROC(_copy_from_user)
18088 -
18089 .section .fixup,"ax"
18090 /* must zero dest */
18091 ENTRY(bad_from_user)
18092 bad_from_user:
18093 CFI_STARTPROC
18094 + testl %edx,%edx
18095 + js bad_to_user
18096 movl %edx,%ecx
18097 xorl %eax,%eax
18098 rep
18099 stosb
18100 bad_to_user:
18101 movl %edx,%eax
18102 + pax_force_retaddr
18103 ret
18104 CFI_ENDPROC
18105 ENDPROC(bad_from_user)
18106 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
18107 decl %ecx
18108 jnz 21b
18109 23: xor %eax,%eax
18110 + pax_force_retaddr
18111 ret
18112
18113 .section .fixup,"ax"
18114 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
18115 3: rep
18116 movsb
18117 4: xorl %eax,%eax
18118 + pax_force_retaddr
18119 ret
18120
18121 .section .fixup,"ax"
18122 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
18123 1: rep
18124 movsb
18125 2: xorl %eax,%eax
18126 + pax_force_retaddr
18127 ret
18128
18129 .section .fixup,"ax"
18130 diff -urNp linux-3.0.8/arch/x86/lib/copy_user_nocache_64.S linux-3.0.8/arch/x86/lib/copy_user_nocache_64.S
18131 --- linux-3.0.8/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
18132 +++ linux-3.0.8/arch/x86/lib/copy_user_nocache_64.S 2011-10-06 04:17:55.000000000 -0400
18133 @@ -8,12 +8,14 @@
18134
18135 #include <linux/linkage.h>
18136 #include <asm/dwarf2.h>
18137 +#include <asm/alternative-asm.h>
18138
18139 #define FIX_ALIGNMENT 1
18140
18141 #include <asm/current.h>
18142 #include <asm/asm-offsets.h>
18143 #include <asm/thread_info.h>
18144 +#include <asm/pgtable.h>
18145
18146 .macro ALIGN_DESTINATION
18147 #ifdef FIX_ALIGNMENT
18148 @@ -50,6 +52,15 @@
18149 */
18150 ENTRY(__copy_user_nocache)
18151 CFI_STARTPROC
18152 +
18153 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18154 + mov $PAX_USER_SHADOW_BASE,%rcx
18155 + cmp %rcx,%rsi
18156 + jae 1f
18157 + add %rcx,%rsi
18158 +1:
18159 +#endif
18160 +
18161 cmpl $8,%edx
18162 jb 20f /* less then 8 bytes, go to byte copy loop */
18163 ALIGN_DESTINATION
18164 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
18165 jnz 21b
18166 23: xorl %eax,%eax
18167 sfence
18168 + pax_force_retaddr
18169 ret
18170
18171 .section .fixup,"ax"
18172 diff -urNp linux-3.0.8/arch/x86/lib/csum-copy_64.S linux-3.0.8/arch/x86/lib/csum-copy_64.S
18173 --- linux-3.0.8/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
18174 +++ linux-3.0.8/arch/x86/lib/csum-copy_64.S 2011-10-06 04:17:55.000000000 -0400
18175 @@ -8,6 +8,7 @@
18176 #include <linux/linkage.h>
18177 #include <asm/dwarf2.h>
18178 #include <asm/errno.h>
18179 +#include <asm/alternative-asm.h>
18180
18181 /*
18182 * Checksum copy with exception handling.
18183 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
18184 CFI_RESTORE rbp
18185 addq $7*8, %rsp
18186 CFI_ADJUST_CFA_OFFSET -7*8
18187 + pax_force_retaddr
18188 ret
18189 CFI_RESTORE_STATE
18190
18191 diff -urNp linux-3.0.8/arch/x86/lib/csum-wrappers_64.c linux-3.0.8/arch/x86/lib/csum-wrappers_64.c
18192 --- linux-3.0.8/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
18193 +++ linux-3.0.8/arch/x86/lib/csum-wrappers_64.c 2011-10-06 04:17:55.000000000 -0400
18194 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
18195 len -= 2;
18196 }
18197 }
18198 - isum = csum_partial_copy_generic((__force const void *)src,
18199 +
18200 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18201 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18202 + src += PAX_USER_SHADOW_BASE;
18203 +#endif
18204 +
18205 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
18206 dst, len, isum, errp, NULL);
18207 if (unlikely(*errp))
18208 goto out_err;
18209 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
18210 }
18211
18212 *errp = 0;
18213 - return csum_partial_copy_generic(src, (void __force *)dst,
18214 +
18215 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18216 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18217 + dst += PAX_USER_SHADOW_BASE;
18218 +#endif
18219 +
18220 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
18221 len, isum, NULL, errp);
18222 }
18223 EXPORT_SYMBOL(csum_partial_copy_to_user);
18224 diff -urNp linux-3.0.8/arch/x86/lib/getuser.S linux-3.0.8/arch/x86/lib/getuser.S
18225 --- linux-3.0.8/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
18226 +++ linux-3.0.8/arch/x86/lib/getuser.S 2011-10-07 19:07:23.000000000 -0400
18227 @@ -33,15 +33,38 @@
18228 #include <asm/asm-offsets.h>
18229 #include <asm/thread_info.h>
18230 #include <asm/asm.h>
18231 +#include <asm/segment.h>
18232 +#include <asm/pgtable.h>
18233 +#include <asm/alternative-asm.h>
18234 +
18235 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18236 +#define __copyuser_seg gs;
18237 +#else
18238 +#define __copyuser_seg
18239 +#endif
18240
18241 .text
18242 ENTRY(__get_user_1)
18243 CFI_STARTPROC
18244 +
18245 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18246 GET_THREAD_INFO(%_ASM_DX)
18247 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18248 jae bad_get_user
18249 -1: movzb (%_ASM_AX),%edx
18250 +
18251 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18252 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18253 + cmp %_ASM_DX,%_ASM_AX
18254 + jae 1234f
18255 + add %_ASM_DX,%_ASM_AX
18256 +1234:
18257 +#endif
18258 +
18259 +#endif
18260 +
18261 +1: __copyuser_seg movzb (%_ASM_AX),%edx
18262 xor %eax,%eax
18263 + pax_force_retaddr
18264 ret
18265 CFI_ENDPROC
18266 ENDPROC(__get_user_1)
18267 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
18268 ENTRY(__get_user_2)
18269 CFI_STARTPROC
18270 add $1,%_ASM_AX
18271 +
18272 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18273 jc bad_get_user
18274 GET_THREAD_INFO(%_ASM_DX)
18275 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18276 jae bad_get_user
18277 -2: movzwl -1(%_ASM_AX),%edx
18278 +
18279 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18280 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18281 + cmp %_ASM_DX,%_ASM_AX
18282 + jae 1234f
18283 + add %_ASM_DX,%_ASM_AX
18284 +1234:
18285 +#endif
18286 +
18287 +#endif
18288 +
18289 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18290 xor %eax,%eax
18291 + pax_force_retaddr
18292 ret
18293 CFI_ENDPROC
18294 ENDPROC(__get_user_2)
18295 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
18296 ENTRY(__get_user_4)
18297 CFI_STARTPROC
18298 add $3,%_ASM_AX
18299 +
18300 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18301 jc bad_get_user
18302 GET_THREAD_INFO(%_ASM_DX)
18303 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18304 jae bad_get_user
18305 -3: mov -3(%_ASM_AX),%edx
18306 +
18307 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18308 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18309 + cmp %_ASM_DX,%_ASM_AX
18310 + jae 1234f
18311 + add %_ASM_DX,%_ASM_AX
18312 +1234:
18313 +#endif
18314 +
18315 +#endif
18316 +
18317 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
18318 xor %eax,%eax
18319 + pax_force_retaddr
18320 ret
18321 CFI_ENDPROC
18322 ENDPROC(__get_user_4)
18323 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
18324 GET_THREAD_INFO(%_ASM_DX)
18325 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18326 jae bad_get_user
18327 +
18328 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18329 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18330 + cmp %_ASM_DX,%_ASM_AX
18331 + jae 1234f
18332 + add %_ASM_DX,%_ASM_AX
18333 +1234:
18334 +#endif
18335 +
18336 4: movq -7(%_ASM_AX),%_ASM_DX
18337 xor %eax,%eax
18338 + pax_force_retaddr
18339 ret
18340 CFI_ENDPROC
18341 ENDPROC(__get_user_8)
18342 @@ -91,6 +152,7 @@ bad_get_user:
18343 CFI_STARTPROC
18344 xor %edx,%edx
18345 mov $(-EFAULT),%_ASM_AX
18346 + pax_force_retaddr
18347 ret
18348 CFI_ENDPROC
18349 END(bad_get_user)
18350 diff -urNp linux-3.0.8/arch/x86/lib/insn.c linux-3.0.8/arch/x86/lib/insn.c
18351 --- linux-3.0.8/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
18352 +++ linux-3.0.8/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
18353 @@ -21,6 +21,11 @@
18354 #include <linux/string.h>
18355 #include <asm/inat.h>
18356 #include <asm/insn.h>
18357 +#ifdef __KERNEL__
18358 +#include <asm/pgtable_types.h>
18359 +#else
18360 +#define ktla_ktva(addr) addr
18361 +#endif
18362
18363 #define get_next(t, insn) \
18364 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
18365 @@ -40,8 +45,8 @@
18366 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
18367 {
18368 memset(insn, 0, sizeof(*insn));
18369 - insn->kaddr = kaddr;
18370 - insn->next_byte = kaddr;
18371 + insn->kaddr = ktla_ktva(kaddr);
18372 + insn->next_byte = ktla_ktva(kaddr);
18373 insn->x86_64 = x86_64 ? 1 : 0;
18374 insn->opnd_bytes = 4;
18375 if (x86_64)
18376 diff -urNp linux-3.0.8/arch/x86/lib/iomap_copy_64.S linux-3.0.8/arch/x86/lib/iomap_copy_64.S
18377 --- linux-3.0.8/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
18378 +++ linux-3.0.8/arch/x86/lib/iomap_copy_64.S 2011-10-06 04:17:55.000000000 -0400
18379 @@ -17,6 +17,7 @@
18380
18381 #include <linux/linkage.h>
18382 #include <asm/dwarf2.h>
18383 +#include <asm/alternative-asm.h>
18384
18385 /*
18386 * override generic version in lib/iomap_copy.c
18387 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
18388 CFI_STARTPROC
18389 movl %edx,%ecx
18390 rep movsd
18391 + pax_force_retaddr
18392 ret
18393 CFI_ENDPROC
18394 ENDPROC(__iowrite32_copy)
18395 diff -urNp linux-3.0.8/arch/x86/lib/memcpy_64.S linux-3.0.8/arch/x86/lib/memcpy_64.S
18396 --- linux-3.0.8/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
18397 +++ linux-3.0.8/arch/x86/lib/memcpy_64.S 2011-10-06 04:17:55.000000000 -0400
18398 @@ -34,6 +34,7 @@
18399 rep movsq
18400 movl %edx, %ecx
18401 rep movsb
18402 + pax_force_retaddr
18403 ret
18404 .Lmemcpy_e:
18405 .previous
18406 @@ -51,6 +52,7 @@
18407
18408 movl %edx, %ecx
18409 rep movsb
18410 + pax_force_retaddr
18411 ret
18412 .Lmemcpy_e_e:
18413 .previous
18414 @@ -141,6 +143,7 @@ ENTRY(memcpy)
18415 movq %r9, 1*8(%rdi)
18416 movq %r10, -2*8(%rdi, %rdx)
18417 movq %r11, -1*8(%rdi, %rdx)
18418 + pax_force_retaddr
18419 retq
18420 .p2align 4
18421 .Lless_16bytes:
18422 @@ -153,6 +156,7 @@ ENTRY(memcpy)
18423 movq -1*8(%rsi, %rdx), %r9
18424 movq %r8, 0*8(%rdi)
18425 movq %r9, -1*8(%rdi, %rdx)
18426 + pax_force_retaddr
18427 retq
18428 .p2align 4
18429 .Lless_8bytes:
18430 @@ -166,6 +170,7 @@ ENTRY(memcpy)
18431 movl -4(%rsi, %rdx), %r8d
18432 movl %ecx, (%rdi)
18433 movl %r8d, -4(%rdi, %rdx)
18434 + pax_force_retaddr
18435 retq
18436 .p2align 4
18437 .Lless_3bytes:
18438 @@ -183,6 +188,7 @@ ENTRY(memcpy)
18439 jnz .Lloop_1
18440
18441 .Lend:
18442 + pax_force_retaddr
18443 retq
18444 CFI_ENDPROC
18445 ENDPROC(memcpy)
18446 diff -urNp linux-3.0.8/arch/x86/lib/memmove_64.S linux-3.0.8/arch/x86/lib/memmove_64.S
18447 --- linux-3.0.8/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
18448 +++ linux-3.0.8/arch/x86/lib/memmove_64.S 2011-10-06 04:17:55.000000000 -0400
18449 @@ -9,6 +9,7 @@
18450 #include <linux/linkage.h>
18451 #include <asm/dwarf2.h>
18452 #include <asm/cpufeature.h>
18453 +#include <asm/alternative-asm.h>
18454
18455 #undef memmove
18456
18457 @@ -201,6 +202,7 @@ ENTRY(memmove)
18458 movb (%rsi), %r11b
18459 movb %r11b, (%rdi)
18460 13:
18461 + pax_force_retaddr
18462 retq
18463 CFI_ENDPROC
18464
18465 @@ -209,6 +211,7 @@ ENTRY(memmove)
18466 /* Forward moving data. */
18467 movq %rdx, %rcx
18468 rep movsb
18469 + pax_force_retaddr
18470 retq
18471 .Lmemmove_end_forward_efs:
18472 .previous
18473 diff -urNp linux-3.0.8/arch/x86/lib/memset_64.S linux-3.0.8/arch/x86/lib/memset_64.S
18474 --- linux-3.0.8/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
18475 +++ linux-3.0.8/arch/x86/lib/memset_64.S 2011-10-06 04:17:55.000000000 -0400
18476 @@ -31,6 +31,7 @@
18477 movl %r8d,%ecx
18478 rep stosb
18479 movq %r9,%rax
18480 + pax_force_retaddr
18481 ret
18482 .Lmemset_e:
18483 .previous
18484 @@ -53,6 +54,7 @@
18485 movl %edx,%ecx
18486 rep stosb
18487 movq %r9,%rax
18488 + pax_force_retaddr
18489 ret
18490 .Lmemset_e_e:
18491 .previous
18492 @@ -121,6 +123,7 @@ ENTRY(__memset)
18493
18494 .Lende:
18495 movq %r10,%rax
18496 + pax_force_retaddr
18497 ret
18498
18499 CFI_RESTORE_STATE
18500 diff -urNp linux-3.0.8/arch/x86/lib/mmx_32.c linux-3.0.8/arch/x86/lib/mmx_32.c
18501 --- linux-3.0.8/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
18502 +++ linux-3.0.8/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
18503 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18504 {
18505 void *p;
18506 int i;
18507 + unsigned long cr0;
18508
18509 if (unlikely(in_interrupt()))
18510 return __memcpy(to, from, len);
18511 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18512 kernel_fpu_begin();
18513
18514 __asm__ __volatile__ (
18515 - "1: prefetch (%0)\n" /* This set is 28 bytes */
18516 - " prefetch 64(%0)\n"
18517 - " prefetch 128(%0)\n"
18518 - " prefetch 192(%0)\n"
18519 - " prefetch 256(%0)\n"
18520 + "1: prefetch (%1)\n" /* This set is 28 bytes */
18521 + " prefetch 64(%1)\n"
18522 + " prefetch 128(%1)\n"
18523 + " prefetch 192(%1)\n"
18524 + " prefetch 256(%1)\n"
18525 "2: \n"
18526 ".section .fixup, \"ax\"\n"
18527 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18528 + "3: \n"
18529 +
18530 +#ifdef CONFIG_PAX_KERNEXEC
18531 + " movl %%cr0, %0\n"
18532 + " movl %0, %%eax\n"
18533 + " andl $0xFFFEFFFF, %%eax\n"
18534 + " movl %%eax, %%cr0\n"
18535 +#endif
18536 +
18537 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18538 +
18539 +#ifdef CONFIG_PAX_KERNEXEC
18540 + " movl %0, %%cr0\n"
18541 +#endif
18542 +
18543 " jmp 2b\n"
18544 ".previous\n"
18545 _ASM_EXTABLE(1b, 3b)
18546 - : : "r" (from));
18547 + : "=&r" (cr0) : "r" (from) : "ax");
18548
18549 for ( ; i > 5; i--) {
18550 __asm__ __volatile__ (
18551 - "1: prefetch 320(%0)\n"
18552 - "2: movq (%0), %%mm0\n"
18553 - " movq 8(%0), %%mm1\n"
18554 - " movq 16(%0), %%mm2\n"
18555 - " movq 24(%0), %%mm3\n"
18556 - " movq %%mm0, (%1)\n"
18557 - " movq %%mm1, 8(%1)\n"
18558 - " movq %%mm2, 16(%1)\n"
18559 - " movq %%mm3, 24(%1)\n"
18560 - " movq 32(%0), %%mm0\n"
18561 - " movq 40(%0), %%mm1\n"
18562 - " movq 48(%0), %%mm2\n"
18563 - " movq 56(%0), %%mm3\n"
18564 - " movq %%mm0, 32(%1)\n"
18565 - " movq %%mm1, 40(%1)\n"
18566 - " movq %%mm2, 48(%1)\n"
18567 - " movq %%mm3, 56(%1)\n"
18568 + "1: prefetch 320(%1)\n"
18569 + "2: movq (%1), %%mm0\n"
18570 + " movq 8(%1), %%mm1\n"
18571 + " movq 16(%1), %%mm2\n"
18572 + " movq 24(%1), %%mm3\n"
18573 + " movq %%mm0, (%2)\n"
18574 + " movq %%mm1, 8(%2)\n"
18575 + " movq %%mm2, 16(%2)\n"
18576 + " movq %%mm3, 24(%2)\n"
18577 + " movq 32(%1), %%mm0\n"
18578 + " movq 40(%1), %%mm1\n"
18579 + " movq 48(%1), %%mm2\n"
18580 + " movq 56(%1), %%mm3\n"
18581 + " movq %%mm0, 32(%2)\n"
18582 + " movq %%mm1, 40(%2)\n"
18583 + " movq %%mm2, 48(%2)\n"
18584 + " movq %%mm3, 56(%2)\n"
18585 ".section .fixup, \"ax\"\n"
18586 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18587 + "3:\n"
18588 +
18589 +#ifdef CONFIG_PAX_KERNEXEC
18590 + " movl %%cr0, %0\n"
18591 + " movl %0, %%eax\n"
18592 + " andl $0xFFFEFFFF, %%eax\n"
18593 + " movl %%eax, %%cr0\n"
18594 +#endif
18595 +
18596 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18597 +
18598 +#ifdef CONFIG_PAX_KERNEXEC
18599 + " movl %0, %%cr0\n"
18600 +#endif
18601 +
18602 " jmp 2b\n"
18603 ".previous\n"
18604 _ASM_EXTABLE(1b, 3b)
18605 - : : "r" (from), "r" (to) : "memory");
18606 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18607
18608 from += 64;
18609 to += 64;
18610 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18611 static void fast_copy_page(void *to, void *from)
18612 {
18613 int i;
18614 + unsigned long cr0;
18615
18616 kernel_fpu_begin();
18617
18618 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18619 * but that is for later. -AV
18620 */
18621 __asm__ __volatile__(
18622 - "1: prefetch (%0)\n"
18623 - " prefetch 64(%0)\n"
18624 - " prefetch 128(%0)\n"
18625 - " prefetch 192(%0)\n"
18626 - " prefetch 256(%0)\n"
18627 + "1: prefetch (%1)\n"
18628 + " prefetch 64(%1)\n"
18629 + " prefetch 128(%1)\n"
18630 + " prefetch 192(%1)\n"
18631 + " prefetch 256(%1)\n"
18632 "2: \n"
18633 ".section .fixup, \"ax\"\n"
18634 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18635 + "3: \n"
18636 +
18637 +#ifdef CONFIG_PAX_KERNEXEC
18638 + " movl %%cr0, %0\n"
18639 + " movl %0, %%eax\n"
18640 + " andl $0xFFFEFFFF, %%eax\n"
18641 + " movl %%eax, %%cr0\n"
18642 +#endif
18643 +
18644 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18645 +
18646 +#ifdef CONFIG_PAX_KERNEXEC
18647 + " movl %0, %%cr0\n"
18648 +#endif
18649 +
18650 " jmp 2b\n"
18651 ".previous\n"
18652 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18653 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18654
18655 for (i = 0; i < (4096-320)/64; i++) {
18656 __asm__ __volatile__ (
18657 - "1: prefetch 320(%0)\n"
18658 - "2: movq (%0), %%mm0\n"
18659 - " movntq %%mm0, (%1)\n"
18660 - " movq 8(%0), %%mm1\n"
18661 - " movntq %%mm1, 8(%1)\n"
18662 - " movq 16(%0), %%mm2\n"
18663 - " movntq %%mm2, 16(%1)\n"
18664 - " movq 24(%0), %%mm3\n"
18665 - " movntq %%mm3, 24(%1)\n"
18666 - " movq 32(%0), %%mm4\n"
18667 - " movntq %%mm4, 32(%1)\n"
18668 - " movq 40(%0), %%mm5\n"
18669 - " movntq %%mm5, 40(%1)\n"
18670 - " movq 48(%0), %%mm6\n"
18671 - " movntq %%mm6, 48(%1)\n"
18672 - " movq 56(%0), %%mm7\n"
18673 - " movntq %%mm7, 56(%1)\n"
18674 + "1: prefetch 320(%1)\n"
18675 + "2: movq (%1), %%mm0\n"
18676 + " movntq %%mm0, (%2)\n"
18677 + " movq 8(%1), %%mm1\n"
18678 + " movntq %%mm1, 8(%2)\n"
18679 + " movq 16(%1), %%mm2\n"
18680 + " movntq %%mm2, 16(%2)\n"
18681 + " movq 24(%1), %%mm3\n"
18682 + " movntq %%mm3, 24(%2)\n"
18683 + " movq 32(%1), %%mm4\n"
18684 + " movntq %%mm4, 32(%2)\n"
18685 + " movq 40(%1), %%mm5\n"
18686 + " movntq %%mm5, 40(%2)\n"
18687 + " movq 48(%1), %%mm6\n"
18688 + " movntq %%mm6, 48(%2)\n"
18689 + " movq 56(%1), %%mm7\n"
18690 + " movntq %%mm7, 56(%2)\n"
18691 ".section .fixup, \"ax\"\n"
18692 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18693 + "3:\n"
18694 +
18695 +#ifdef CONFIG_PAX_KERNEXEC
18696 + " movl %%cr0, %0\n"
18697 + " movl %0, %%eax\n"
18698 + " andl $0xFFFEFFFF, %%eax\n"
18699 + " movl %%eax, %%cr0\n"
18700 +#endif
18701 +
18702 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18703 +
18704 +#ifdef CONFIG_PAX_KERNEXEC
18705 + " movl %0, %%cr0\n"
18706 +#endif
18707 +
18708 " jmp 2b\n"
18709 ".previous\n"
18710 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18711 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18712
18713 from += 64;
18714 to += 64;
18715 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18716 static void fast_copy_page(void *to, void *from)
18717 {
18718 int i;
18719 + unsigned long cr0;
18720
18721 kernel_fpu_begin();
18722
18723 __asm__ __volatile__ (
18724 - "1: prefetch (%0)\n"
18725 - " prefetch 64(%0)\n"
18726 - " prefetch 128(%0)\n"
18727 - " prefetch 192(%0)\n"
18728 - " prefetch 256(%0)\n"
18729 + "1: prefetch (%1)\n"
18730 + " prefetch 64(%1)\n"
18731 + " prefetch 128(%1)\n"
18732 + " prefetch 192(%1)\n"
18733 + " prefetch 256(%1)\n"
18734 "2: \n"
18735 ".section .fixup, \"ax\"\n"
18736 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18737 + "3: \n"
18738 +
18739 +#ifdef CONFIG_PAX_KERNEXEC
18740 + " movl %%cr0, %0\n"
18741 + " movl %0, %%eax\n"
18742 + " andl $0xFFFEFFFF, %%eax\n"
18743 + " movl %%eax, %%cr0\n"
18744 +#endif
18745 +
18746 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18747 +
18748 +#ifdef CONFIG_PAX_KERNEXEC
18749 + " movl %0, %%cr0\n"
18750 +#endif
18751 +
18752 " jmp 2b\n"
18753 ".previous\n"
18754 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18755 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18756
18757 for (i = 0; i < 4096/64; i++) {
18758 __asm__ __volatile__ (
18759 - "1: prefetch 320(%0)\n"
18760 - "2: movq (%0), %%mm0\n"
18761 - " movq 8(%0), %%mm1\n"
18762 - " movq 16(%0), %%mm2\n"
18763 - " movq 24(%0), %%mm3\n"
18764 - " movq %%mm0, (%1)\n"
18765 - " movq %%mm1, 8(%1)\n"
18766 - " movq %%mm2, 16(%1)\n"
18767 - " movq %%mm3, 24(%1)\n"
18768 - " movq 32(%0), %%mm0\n"
18769 - " movq 40(%0), %%mm1\n"
18770 - " movq 48(%0), %%mm2\n"
18771 - " movq 56(%0), %%mm3\n"
18772 - " movq %%mm0, 32(%1)\n"
18773 - " movq %%mm1, 40(%1)\n"
18774 - " movq %%mm2, 48(%1)\n"
18775 - " movq %%mm3, 56(%1)\n"
18776 + "1: prefetch 320(%1)\n"
18777 + "2: movq (%1), %%mm0\n"
18778 + " movq 8(%1), %%mm1\n"
18779 + " movq 16(%1), %%mm2\n"
18780 + " movq 24(%1), %%mm3\n"
18781 + " movq %%mm0, (%2)\n"
18782 + " movq %%mm1, 8(%2)\n"
18783 + " movq %%mm2, 16(%2)\n"
18784 + " movq %%mm3, 24(%2)\n"
18785 + " movq 32(%1), %%mm0\n"
18786 + " movq 40(%1), %%mm1\n"
18787 + " movq 48(%1), %%mm2\n"
18788 + " movq 56(%1), %%mm3\n"
18789 + " movq %%mm0, 32(%2)\n"
18790 + " movq %%mm1, 40(%2)\n"
18791 + " movq %%mm2, 48(%2)\n"
18792 + " movq %%mm3, 56(%2)\n"
18793 ".section .fixup, \"ax\"\n"
18794 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18795 + "3:\n"
18796 +
18797 +#ifdef CONFIG_PAX_KERNEXEC
18798 + " movl %%cr0, %0\n"
18799 + " movl %0, %%eax\n"
18800 + " andl $0xFFFEFFFF, %%eax\n"
18801 + " movl %%eax, %%cr0\n"
18802 +#endif
18803 +
18804 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18805 +
18806 +#ifdef CONFIG_PAX_KERNEXEC
18807 + " movl %0, %%cr0\n"
18808 +#endif
18809 +
18810 " jmp 2b\n"
18811 ".previous\n"
18812 _ASM_EXTABLE(1b, 3b)
18813 - : : "r" (from), "r" (to) : "memory");
18814 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18815
18816 from += 64;
18817 to += 64;
18818 diff -urNp linux-3.0.8/arch/x86/lib/msr-reg.S linux-3.0.8/arch/x86/lib/msr-reg.S
18819 --- linux-3.0.8/arch/x86/lib/msr-reg.S 2011-07-21 22:17:23.000000000 -0400
18820 +++ linux-3.0.8/arch/x86/lib/msr-reg.S 2011-10-07 19:07:28.000000000 -0400
18821 @@ -3,6 +3,7 @@
18822 #include <asm/dwarf2.h>
18823 #include <asm/asm.h>
18824 #include <asm/msr.h>
18825 +#include <asm/alternative-asm.h>
18826
18827 #ifdef CONFIG_X86_64
18828 /*
18829 @@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18830 movl %edi, 28(%r10)
18831 popq_cfi %rbp
18832 popq_cfi %rbx
18833 + pax_force_retaddr
18834 ret
18835 3:
18836 CFI_RESTORE_STATE
18837 diff -urNp linux-3.0.8/arch/x86/lib/putuser.S linux-3.0.8/arch/x86/lib/putuser.S
18838 --- linux-3.0.8/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
18839 +++ linux-3.0.8/arch/x86/lib/putuser.S 2011-10-07 19:07:23.000000000 -0400
18840 @@ -15,7 +15,9 @@
18841 #include <asm/thread_info.h>
18842 #include <asm/errno.h>
18843 #include <asm/asm.h>
18844 -
18845 +#include <asm/segment.h>
18846 +#include <asm/pgtable.h>
18847 +#include <asm/alternative-asm.h>
18848
18849 /*
18850 * __put_user_X
18851 @@ -29,52 +31,119 @@
18852 * as they get called from within inline assembly.
18853 */
18854
18855 -#define ENTER CFI_STARTPROC ; \
18856 - GET_THREAD_INFO(%_ASM_BX)
18857 -#define EXIT ret ; \
18858 +#define ENTER CFI_STARTPROC
18859 +#define EXIT pax_force_retaddr; ret ; \
18860 CFI_ENDPROC
18861
18862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18863 +#define _DEST %_ASM_CX,%_ASM_BX
18864 +#else
18865 +#define _DEST %_ASM_CX
18866 +#endif
18867 +
18868 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18869 +#define __copyuser_seg gs;
18870 +#else
18871 +#define __copyuser_seg
18872 +#endif
18873 +
18874 .text
18875 ENTRY(__put_user_1)
18876 ENTER
18877 +
18878 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18879 + GET_THREAD_INFO(%_ASM_BX)
18880 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18881 jae bad_put_user
18882 -1: movb %al,(%_ASM_CX)
18883 +
18884 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18885 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18886 + cmp %_ASM_BX,%_ASM_CX
18887 + jb 1234f
18888 + xor %ebx,%ebx
18889 +1234:
18890 +#endif
18891 +
18892 +#endif
18893 +
18894 +1: __copyuser_seg movb %al,(_DEST)
18895 xor %eax,%eax
18896 EXIT
18897 ENDPROC(__put_user_1)
18898
18899 ENTRY(__put_user_2)
18900 ENTER
18901 +
18902 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18903 + GET_THREAD_INFO(%_ASM_BX)
18904 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18905 sub $1,%_ASM_BX
18906 cmp %_ASM_BX,%_ASM_CX
18907 jae bad_put_user
18908 -2: movw %ax,(%_ASM_CX)
18909 +
18910 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18911 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18912 + cmp %_ASM_BX,%_ASM_CX
18913 + jb 1234f
18914 + xor %ebx,%ebx
18915 +1234:
18916 +#endif
18917 +
18918 +#endif
18919 +
18920 +2: __copyuser_seg movw %ax,(_DEST)
18921 xor %eax,%eax
18922 EXIT
18923 ENDPROC(__put_user_2)
18924
18925 ENTRY(__put_user_4)
18926 ENTER
18927 +
18928 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18929 + GET_THREAD_INFO(%_ASM_BX)
18930 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18931 sub $3,%_ASM_BX
18932 cmp %_ASM_BX,%_ASM_CX
18933 jae bad_put_user
18934 -3: movl %eax,(%_ASM_CX)
18935 +
18936 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18937 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18938 + cmp %_ASM_BX,%_ASM_CX
18939 + jb 1234f
18940 + xor %ebx,%ebx
18941 +1234:
18942 +#endif
18943 +
18944 +#endif
18945 +
18946 +3: __copyuser_seg movl %eax,(_DEST)
18947 xor %eax,%eax
18948 EXIT
18949 ENDPROC(__put_user_4)
18950
18951 ENTRY(__put_user_8)
18952 ENTER
18953 +
18954 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18955 + GET_THREAD_INFO(%_ASM_BX)
18956 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18957 sub $7,%_ASM_BX
18958 cmp %_ASM_BX,%_ASM_CX
18959 jae bad_put_user
18960 -4: mov %_ASM_AX,(%_ASM_CX)
18961 +
18962 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18963 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18964 + cmp %_ASM_BX,%_ASM_CX
18965 + jb 1234f
18966 + xor %ebx,%ebx
18967 +1234:
18968 +#endif
18969 +
18970 +#endif
18971 +
18972 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
18973 #ifdef CONFIG_X86_32
18974 -5: movl %edx,4(%_ASM_CX)
18975 +5: __copyuser_seg movl %edx,4(_DEST)
18976 #endif
18977 xor %eax,%eax
18978 EXIT
18979 diff -urNp linux-3.0.8/arch/x86/lib/rwlock_64.S linux-3.0.8/arch/x86/lib/rwlock_64.S
18980 --- linux-3.0.8/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18981 +++ linux-3.0.8/arch/x86/lib/rwlock_64.S 2011-10-06 04:17:55.000000000 -0400
18982 @@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
18983 LOCK_PREFIX
18984 subl $RW_LOCK_BIAS,(%rdi)
18985 jnz __write_lock_failed
18986 + pax_force_retaddr
18987 ret
18988 CFI_ENDPROC
18989 END(__write_lock_failed)
18990 @@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
18991 LOCK_PREFIX
18992 decl (%rdi)
18993 js __read_lock_failed
18994 + pax_force_retaddr
18995 ret
18996 CFI_ENDPROC
18997 END(__read_lock_failed)
18998 diff -urNp linux-3.0.8/arch/x86/lib/rwsem_64.S linux-3.0.8/arch/x86/lib/rwsem_64.S
18999 --- linux-3.0.8/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
19000 +++ linux-3.0.8/arch/x86/lib/rwsem_64.S 2011-10-07 10:46:47.000000000 -0400
19001 @@ -51,6 +51,7 @@ ENTRY(call_rwsem_down_read_failed)
19002 popq_cfi %rdx
19003 CFI_RESTORE rdx
19004 restore_common_regs
19005 + pax_force_retaddr
19006 ret
19007 CFI_ENDPROC
19008 ENDPROC(call_rwsem_down_read_failed)
19009 @@ -61,6 +62,7 @@ ENTRY(call_rwsem_down_write_failed)
19010 movq %rax,%rdi
19011 call rwsem_down_write_failed
19012 restore_common_regs
19013 + pax_force_retaddr
19014 ret
19015 CFI_ENDPROC
19016 ENDPROC(call_rwsem_down_write_failed)
19017 @@ -73,7 +75,8 @@ ENTRY(call_rwsem_wake)
19018 movq %rax,%rdi
19019 call rwsem_wake
19020 restore_common_regs
19021 -1: ret
19022 +1: pax_force_retaddr
19023 + ret
19024 CFI_ENDPROC
19025 ENDPROC(call_rwsem_wake)
19026
19027 @@ -88,6 +91,7 @@ ENTRY(call_rwsem_downgrade_wake)
19028 popq_cfi %rdx
19029 CFI_RESTORE rdx
19030 restore_common_regs
19031 + pax_force_retaddr
19032 ret
19033 CFI_ENDPROC
19034 ENDPROC(call_rwsem_downgrade_wake)
19035 diff -urNp linux-3.0.8/arch/x86/lib/thunk_64.S linux-3.0.8/arch/x86/lib/thunk_64.S
19036 --- linux-3.0.8/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
19037 +++ linux-3.0.8/arch/x86/lib/thunk_64.S 2011-10-06 04:17:55.000000000 -0400
19038 @@ -10,7 +10,8 @@
19039 #include <asm/dwarf2.h>
19040 #include <asm/calling.h>
19041 #include <asm/rwlock.h>
19042 -
19043 + #include <asm/alternative-asm.h>
19044 +
19045 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
19046 .macro thunk name,func
19047 .globl \name
19048 @@ -50,5 +51,6 @@
19049 SAVE_ARGS
19050 restore:
19051 RESTORE_ARGS
19052 - ret
19053 + pax_force_retaddr
19054 + ret
19055 CFI_ENDPROC
19056 diff -urNp linux-3.0.8/arch/x86/lib/usercopy_32.c linux-3.0.8/arch/x86/lib/usercopy_32.c
19057 --- linux-3.0.8/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
19058 +++ linux-3.0.8/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
19059 @@ -43,7 +43,7 @@ do { \
19060 __asm__ __volatile__( \
19061 " testl %1,%1\n" \
19062 " jz 2f\n" \
19063 - "0: lodsb\n" \
19064 + "0: "__copyuser_seg"lodsb\n" \
19065 " stosb\n" \
19066 " testb %%al,%%al\n" \
19067 " jz 1f\n" \
19068 @@ -128,10 +128,12 @@ do { \
19069 int __d0; \
19070 might_fault(); \
19071 __asm__ __volatile__( \
19072 + __COPYUSER_SET_ES \
19073 "0: rep; stosl\n" \
19074 " movl %2,%0\n" \
19075 "1: rep; stosb\n" \
19076 "2:\n" \
19077 + __COPYUSER_RESTORE_ES \
19078 ".section .fixup,\"ax\"\n" \
19079 "3: lea 0(%2,%0,4),%0\n" \
19080 " jmp 2b\n" \
19081 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19082 might_fault();
19083
19084 __asm__ __volatile__(
19085 + __COPYUSER_SET_ES
19086 " testl %0, %0\n"
19087 " jz 3f\n"
19088 " andl %0,%%ecx\n"
19089 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19090 " subl %%ecx,%0\n"
19091 " addl %0,%%eax\n"
19092 "1:\n"
19093 + __COPYUSER_RESTORE_ES
19094 ".section .fixup,\"ax\"\n"
19095 "2: xorl %%eax,%%eax\n"
19096 " jmp 1b\n"
19097 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19098
19099 #ifdef CONFIG_X86_INTEL_USERCOPY
19100 static unsigned long
19101 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19102 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19103 {
19104 int d0, d1;
19105 __asm__ __volatile__(
19106 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19107 " .align 2,0x90\n"
19108 "3: movl 0(%4), %%eax\n"
19109 "4: movl 4(%4), %%edx\n"
19110 - "5: movl %%eax, 0(%3)\n"
19111 - "6: movl %%edx, 4(%3)\n"
19112 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19113 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19114 "7: movl 8(%4), %%eax\n"
19115 "8: movl 12(%4),%%edx\n"
19116 - "9: movl %%eax, 8(%3)\n"
19117 - "10: movl %%edx, 12(%3)\n"
19118 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19119 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19120 "11: movl 16(%4), %%eax\n"
19121 "12: movl 20(%4), %%edx\n"
19122 - "13: movl %%eax, 16(%3)\n"
19123 - "14: movl %%edx, 20(%3)\n"
19124 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19125 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19126 "15: movl 24(%4), %%eax\n"
19127 "16: movl 28(%4), %%edx\n"
19128 - "17: movl %%eax, 24(%3)\n"
19129 - "18: movl %%edx, 28(%3)\n"
19130 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19131 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19132 "19: movl 32(%4), %%eax\n"
19133 "20: movl 36(%4), %%edx\n"
19134 - "21: movl %%eax, 32(%3)\n"
19135 - "22: movl %%edx, 36(%3)\n"
19136 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19137 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19138 "23: movl 40(%4), %%eax\n"
19139 "24: movl 44(%4), %%edx\n"
19140 - "25: movl %%eax, 40(%3)\n"
19141 - "26: movl %%edx, 44(%3)\n"
19142 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19143 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19144 "27: movl 48(%4), %%eax\n"
19145 "28: movl 52(%4), %%edx\n"
19146 - "29: movl %%eax, 48(%3)\n"
19147 - "30: movl %%edx, 52(%3)\n"
19148 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19149 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19150 "31: movl 56(%4), %%eax\n"
19151 "32: movl 60(%4), %%edx\n"
19152 - "33: movl %%eax, 56(%3)\n"
19153 - "34: movl %%edx, 60(%3)\n"
19154 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19155 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19156 " addl $-64, %0\n"
19157 " addl $64, %4\n"
19158 " addl $64, %3\n"
19159 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19160 " shrl $2, %0\n"
19161 " andl $3, %%eax\n"
19162 " cld\n"
19163 + __COPYUSER_SET_ES
19164 "99: rep; movsl\n"
19165 "36: movl %%eax, %0\n"
19166 "37: rep; movsb\n"
19167 "100:\n"
19168 + __COPYUSER_RESTORE_ES
19169 + ".section .fixup,\"ax\"\n"
19170 + "101: lea 0(%%eax,%0,4),%0\n"
19171 + " jmp 100b\n"
19172 + ".previous\n"
19173 + ".section __ex_table,\"a\"\n"
19174 + " .align 4\n"
19175 + " .long 1b,100b\n"
19176 + " .long 2b,100b\n"
19177 + " .long 3b,100b\n"
19178 + " .long 4b,100b\n"
19179 + " .long 5b,100b\n"
19180 + " .long 6b,100b\n"
19181 + " .long 7b,100b\n"
19182 + " .long 8b,100b\n"
19183 + " .long 9b,100b\n"
19184 + " .long 10b,100b\n"
19185 + " .long 11b,100b\n"
19186 + " .long 12b,100b\n"
19187 + " .long 13b,100b\n"
19188 + " .long 14b,100b\n"
19189 + " .long 15b,100b\n"
19190 + " .long 16b,100b\n"
19191 + " .long 17b,100b\n"
19192 + " .long 18b,100b\n"
19193 + " .long 19b,100b\n"
19194 + " .long 20b,100b\n"
19195 + " .long 21b,100b\n"
19196 + " .long 22b,100b\n"
19197 + " .long 23b,100b\n"
19198 + " .long 24b,100b\n"
19199 + " .long 25b,100b\n"
19200 + " .long 26b,100b\n"
19201 + " .long 27b,100b\n"
19202 + " .long 28b,100b\n"
19203 + " .long 29b,100b\n"
19204 + " .long 30b,100b\n"
19205 + " .long 31b,100b\n"
19206 + " .long 32b,100b\n"
19207 + " .long 33b,100b\n"
19208 + " .long 34b,100b\n"
19209 + " .long 35b,100b\n"
19210 + " .long 36b,100b\n"
19211 + " .long 37b,100b\n"
19212 + " .long 99b,101b\n"
19213 + ".previous"
19214 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19215 + : "1"(to), "2"(from), "0"(size)
19216 + : "eax", "edx", "memory");
19217 + return size;
19218 +}
19219 +
19220 +static unsigned long
19221 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19222 +{
19223 + int d0, d1;
19224 + __asm__ __volatile__(
19225 + " .align 2,0x90\n"
19226 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19227 + " cmpl $67, %0\n"
19228 + " jbe 3f\n"
19229 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19230 + " .align 2,0x90\n"
19231 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19232 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19233 + "5: movl %%eax, 0(%3)\n"
19234 + "6: movl %%edx, 4(%3)\n"
19235 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19236 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19237 + "9: movl %%eax, 8(%3)\n"
19238 + "10: movl %%edx, 12(%3)\n"
19239 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19240 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19241 + "13: movl %%eax, 16(%3)\n"
19242 + "14: movl %%edx, 20(%3)\n"
19243 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19244 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19245 + "17: movl %%eax, 24(%3)\n"
19246 + "18: movl %%edx, 28(%3)\n"
19247 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19248 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19249 + "21: movl %%eax, 32(%3)\n"
19250 + "22: movl %%edx, 36(%3)\n"
19251 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19252 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19253 + "25: movl %%eax, 40(%3)\n"
19254 + "26: movl %%edx, 44(%3)\n"
19255 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19256 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19257 + "29: movl %%eax, 48(%3)\n"
19258 + "30: movl %%edx, 52(%3)\n"
19259 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19260 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19261 + "33: movl %%eax, 56(%3)\n"
19262 + "34: movl %%edx, 60(%3)\n"
19263 + " addl $-64, %0\n"
19264 + " addl $64, %4\n"
19265 + " addl $64, %3\n"
19266 + " cmpl $63, %0\n"
19267 + " ja 1b\n"
19268 + "35: movl %0, %%eax\n"
19269 + " shrl $2, %0\n"
19270 + " andl $3, %%eax\n"
19271 + " cld\n"
19272 + "99: rep; "__copyuser_seg" movsl\n"
19273 + "36: movl %%eax, %0\n"
19274 + "37: rep; "__copyuser_seg" movsb\n"
19275 + "100:\n"
19276 ".section .fixup,\"ax\"\n"
19277 "101: lea 0(%%eax,%0,4),%0\n"
19278 " jmp 100b\n"
19279 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19280 int d0, d1;
19281 __asm__ __volatile__(
19282 " .align 2,0x90\n"
19283 - "0: movl 32(%4), %%eax\n"
19284 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19285 " cmpl $67, %0\n"
19286 " jbe 2f\n"
19287 - "1: movl 64(%4), %%eax\n"
19288 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19289 " .align 2,0x90\n"
19290 - "2: movl 0(%4), %%eax\n"
19291 - "21: movl 4(%4), %%edx\n"
19292 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19293 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19294 " movl %%eax, 0(%3)\n"
19295 " movl %%edx, 4(%3)\n"
19296 - "3: movl 8(%4), %%eax\n"
19297 - "31: movl 12(%4),%%edx\n"
19298 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19299 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19300 " movl %%eax, 8(%3)\n"
19301 " movl %%edx, 12(%3)\n"
19302 - "4: movl 16(%4), %%eax\n"
19303 - "41: movl 20(%4), %%edx\n"
19304 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19305 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19306 " movl %%eax, 16(%3)\n"
19307 " movl %%edx, 20(%3)\n"
19308 - "10: movl 24(%4), %%eax\n"
19309 - "51: movl 28(%4), %%edx\n"
19310 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19311 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19312 " movl %%eax, 24(%3)\n"
19313 " movl %%edx, 28(%3)\n"
19314 - "11: movl 32(%4), %%eax\n"
19315 - "61: movl 36(%4), %%edx\n"
19316 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19317 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19318 " movl %%eax, 32(%3)\n"
19319 " movl %%edx, 36(%3)\n"
19320 - "12: movl 40(%4), %%eax\n"
19321 - "71: movl 44(%4), %%edx\n"
19322 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19323 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19324 " movl %%eax, 40(%3)\n"
19325 " movl %%edx, 44(%3)\n"
19326 - "13: movl 48(%4), %%eax\n"
19327 - "81: movl 52(%4), %%edx\n"
19328 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19329 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19330 " movl %%eax, 48(%3)\n"
19331 " movl %%edx, 52(%3)\n"
19332 - "14: movl 56(%4), %%eax\n"
19333 - "91: movl 60(%4), %%edx\n"
19334 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19335 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19336 " movl %%eax, 56(%3)\n"
19337 " movl %%edx, 60(%3)\n"
19338 " addl $-64, %0\n"
19339 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19340 " shrl $2, %0\n"
19341 " andl $3, %%eax\n"
19342 " cld\n"
19343 - "6: rep; movsl\n"
19344 + "6: rep; "__copyuser_seg" movsl\n"
19345 " movl %%eax,%0\n"
19346 - "7: rep; movsb\n"
19347 + "7: rep; "__copyuser_seg" movsb\n"
19348 "8:\n"
19349 ".section .fixup,\"ax\"\n"
19350 "9: lea 0(%%eax,%0,4),%0\n"
19351 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19352
19353 __asm__ __volatile__(
19354 " .align 2,0x90\n"
19355 - "0: movl 32(%4), %%eax\n"
19356 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19357 " cmpl $67, %0\n"
19358 " jbe 2f\n"
19359 - "1: movl 64(%4), %%eax\n"
19360 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19361 " .align 2,0x90\n"
19362 - "2: movl 0(%4), %%eax\n"
19363 - "21: movl 4(%4), %%edx\n"
19364 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19365 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19366 " movnti %%eax, 0(%3)\n"
19367 " movnti %%edx, 4(%3)\n"
19368 - "3: movl 8(%4), %%eax\n"
19369 - "31: movl 12(%4),%%edx\n"
19370 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19371 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19372 " movnti %%eax, 8(%3)\n"
19373 " movnti %%edx, 12(%3)\n"
19374 - "4: movl 16(%4), %%eax\n"
19375 - "41: movl 20(%4), %%edx\n"
19376 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19377 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19378 " movnti %%eax, 16(%3)\n"
19379 " movnti %%edx, 20(%3)\n"
19380 - "10: movl 24(%4), %%eax\n"
19381 - "51: movl 28(%4), %%edx\n"
19382 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19383 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19384 " movnti %%eax, 24(%3)\n"
19385 " movnti %%edx, 28(%3)\n"
19386 - "11: movl 32(%4), %%eax\n"
19387 - "61: movl 36(%4), %%edx\n"
19388 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19389 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19390 " movnti %%eax, 32(%3)\n"
19391 " movnti %%edx, 36(%3)\n"
19392 - "12: movl 40(%4), %%eax\n"
19393 - "71: movl 44(%4), %%edx\n"
19394 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19395 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19396 " movnti %%eax, 40(%3)\n"
19397 " movnti %%edx, 44(%3)\n"
19398 - "13: movl 48(%4), %%eax\n"
19399 - "81: movl 52(%4), %%edx\n"
19400 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19401 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19402 " movnti %%eax, 48(%3)\n"
19403 " movnti %%edx, 52(%3)\n"
19404 - "14: movl 56(%4), %%eax\n"
19405 - "91: movl 60(%4), %%edx\n"
19406 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19407 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19408 " movnti %%eax, 56(%3)\n"
19409 " movnti %%edx, 60(%3)\n"
19410 " addl $-64, %0\n"
19411 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19412 " shrl $2, %0\n"
19413 " andl $3, %%eax\n"
19414 " cld\n"
19415 - "6: rep; movsl\n"
19416 + "6: rep; "__copyuser_seg" movsl\n"
19417 " movl %%eax,%0\n"
19418 - "7: rep; movsb\n"
19419 + "7: rep; "__copyuser_seg" movsb\n"
19420 "8:\n"
19421 ".section .fixup,\"ax\"\n"
19422 "9: lea 0(%%eax,%0,4),%0\n"
19423 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19424
19425 __asm__ __volatile__(
19426 " .align 2,0x90\n"
19427 - "0: movl 32(%4), %%eax\n"
19428 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19429 " cmpl $67, %0\n"
19430 " jbe 2f\n"
19431 - "1: movl 64(%4), %%eax\n"
19432 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19433 " .align 2,0x90\n"
19434 - "2: movl 0(%4), %%eax\n"
19435 - "21: movl 4(%4), %%edx\n"
19436 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19437 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19438 " movnti %%eax, 0(%3)\n"
19439 " movnti %%edx, 4(%3)\n"
19440 - "3: movl 8(%4), %%eax\n"
19441 - "31: movl 12(%4),%%edx\n"
19442 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19443 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19444 " movnti %%eax, 8(%3)\n"
19445 " movnti %%edx, 12(%3)\n"
19446 - "4: movl 16(%4), %%eax\n"
19447 - "41: movl 20(%4), %%edx\n"
19448 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19449 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19450 " movnti %%eax, 16(%3)\n"
19451 " movnti %%edx, 20(%3)\n"
19452 - "10: movl 24(%4), %%eax\n"
19453 - "51: movl 28(%4), %%edx\n"
19454 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19455 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19456 " movnti %%eax, 24(%3)\n"
19457 " movnti %%edx, 28(%3)\n"
19458 - "11: movl 32(%4), %%eax\n"
19459 - "61: movl 36(%4), %%edx\n"
19460 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19461 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19462 " movnti %%eax, 32(%3)\n"
19463 " movnti %%edx, 36(%3)\n"
19464 - "12: movl 40(%4), %%eax\n"
19465 - "71: movl 44(%4), %%edx\n"
19466 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19467 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19468 " movnti %%eax, 40(%3)\n"
19469 " movnti %%edx, 44(%3)\n"
19470 - "13: movl 48(%4), %%eax\n"
19471 - "81: movl 52(%4), %%edx\n"
19472 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19473 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19474 " movnti %%eax, 48(%3)\n"
19475 " movnti %%edx, 52(%3)\n"
19476 - "14: movl 56(%4), %%eax\n"
19477 - "91: movl 60(%4), %%edx\n"
19478 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19479 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19480 " movnti %%eax, 56(%3)\n"
19481 " movnti %%edx, 60(%3)\n"
19482 " addl $-64, %0\n"
19483 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19484 " shrl $2, %0\n"
19485 " andl $3, %%eax\n"
19486 " cld\n"
19487 - "6: rep; movsl\n"
19488 + "6: rep; "__copyuser_seg" movsl\n"
19489 " movl %%eax,%0\n"
19490 - "7: rep; movsb\n"
19491 + "7: rep; "__copyuser_seg" movsb\n"
19492 "8:\n"
19493 ".section .fixup,\"ax\"\n"
19494 "9: lea 0(%%eax,%0,4),%0\n"
19495 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19496 */
19497 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19498 unsigned long size);
19499 -unsigned long __copy_user_intel(void __user *to, const void *from,
19500 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19501 + unsigned long size);
19502 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19503 unsigned long size);
19504 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19505 const void __user *from, unsigned long size);
19506 #endif /* CONFIG_X86_INTEL_USERCOPY */
19507
19508 /* Generic arbitrary sized copy. */
19509 -#define __copy_user(to, from, size) \
19510 +#define __copy_user(to, from, size, prefix, set, restore) \
19511 do { \
19512 int __d0, __d1, __d2; \
19513 __asm__ __volatile__( \
19514 + set \
19515 " cmp $7,%0\n" \
19516 " jbe 1f\n" \
19517 " movl %1,%0\n" \
19518 " negl %0\n" \
19519 " andl $7,%0\n" \
19520 " subl %0,%3\n" \
19521 - "4: rep; movsb\n" \
19522 + "4: rep; "prefix"movsb\n" \
19523 " movl %3,%0\n" \
19524 " shrl $2,%0\n" \
19525 " andl $3,%3\n" \
19526 " .align 2,0x90\n" \
19527 - "0: rep; movsl\n" \
19528 + "0: rep; "prefix"movsl\n" \
19529 " movl %3,%0\n" \
19530 - "1: rep; movsb\n" \
19531 + "1: rep; "prefix"movsb\n" \
19532 "2:\n" \
19533 + restore \
19534 ".section .fixup,\"ax\"\n" \
19535 "5: addl %3,%0\n" \
19536 " jmp 2b\n" \
19537 @@ -682,14 +799,14 @@ do { \
19538 " negl %0\n" \
19539 " andl $7,%0\n" \
19540 " subl %0,%3\n" \
19541 - "4: rep; movsb\n" \
19542 + "4: rep; "__copyuser_seg"movsb\n" \
19543 " movl %3,%0\n" \
19544 " shrl $2,%0\n" \
19545 " andl $3,%3\n" \
19546 " .align 2,0x90\n" \
19547 - "0: rep; movsl\n" \
19548 + "0: rep; "__copyuser_seg"movsl\n" \
19549 " movl %3,%0\n" \
19550 - "1: rep; movsb\n" \
19551 + "1: rep; "__copyuser_seg"movsb\n" \
19552 "2:\n" \
19553 ".section .fixup,\"ax\"\n" \
19554 "5: addl %3,%0\n" \
19555 @@ -775,9 +892,9 @@ survive:
19556 }
19557 #endif
19558 if (movsl_is_ok(to, from, n))
19559 - __copy_user(to, from, n);
19560 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19561 else
19562 - n = __copy_user_intel(to, from, n);
19563 + n = __generic_copy_to_user_intel(to, from, n);
19564 return n;
19565 }
19566 EXPORT_SYMBOL(__copy_to_user_ll);
19567 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19568 unsigned long n)
19569 {
19570 if (movsl_is_ok(to, from, n))
19571 - __copy_user(to, from, n);
19572 + __copy_user(to, from, n, __copyuser_seg, "", "");
19573 else
19574 - n = __copy_user_intel((void __user *)to,
19575 - (const void *)from, n);
19576 + n = __generic_copy_from_user_intel(to, from, n);
19577 return n;
19578 }
19579 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19580 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19581 if (n > 64 && cpu_has_xmm2)
19582 n = __copy_user_intel_nocache(to, from, n);
19583 else
19584 - __copy_user(to, from, n);
19585 + __copy_user(to, from, n, __copyuser_seg, "", "");
19586 #else
19587 - __copy_user(to, from, n);
19588 + __copy_user(to, from, n, __copyuser_seg, "", "");
19589 #endif
19590 return n;
19591 }
19592 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19593
19594 -/**
19595 - * copy_to_user: - Copy a block of data into user space.
19596 - * @to: Destination address, in user space.
19597 - * @from: Source address, in kernel space.
19598 - * @n: Number of bytes to copy.
19599 - *
19600 - * Context: User context only. This function may sleep.
19601 - *
19602 - * Copy data from kernel space to user space.
19603 - *
19604 - * Returns number of bytes that could not be copied.
19605 - * On success, this will be zero.
19606 - */
19607 -unsigned long
19608 -copy_to_user(void __user *to, const void *from, unsigned long n)
19609 +void copy_from_user_overflow(void)
19610 {
19611 - if (access_ok(VERIFY_WRITE, to, n))
19612 - n = __copy_to_user(to, from, n);
19613 - return n;
19614 + WARN(1, "Buffer overflow detected!\n");
19615 }
19616 -EXPORT_SYMBOL(copy_to_user);
19617 +EXPORT_SYMBOL(copy_from_user_overflow);
19618
19619 -/**
19620 - * copy_from_user: - Copy a block of data from user space.
19621 - * @to: Destination address, in kernel space.
19622 - * @from: Source address, in user space.
19623 - * @n: Number of bytes to copy.
19624 - *
19625 - * Context: User context only. This function may sleep.
19626 - *
19627 - * Copy data from user space to kernel space.
19628 - *
19629 - * Returns number of bytes that could not be copied.
19630 - * On success, this will be zero.
19631 - *
19632 - * If some data could not be copied, this function will pad the copied
19633 - * data to the requested size using zero bytes.
19634 - */
19635 -unsigned long
19636 -_copy_from_user(void *to, const void __user *from, unsigned long n)
19637 +void copy_to_user_overflow(void)
19638 {
19639 - if (access_ok(VERIFY_READ, from, n))
19640 - n = __copy_from_user(to, from, n);
19641 - else
19642 - memset(to, 0, n);
19643 - return n;
19644 + WARN(1, "Buffer overflow detected!\n");
19645 }
19646 -EXPORT_SYMBOL(_copy_from_user);
19647 +EXPORT_SYMBOL(copy_to_user_overflow);
19648
19649 -void copy_from_user_overflow(void)
19650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19651 +void __set_fs(mm_segment_t x)
19652 {
19653 - WARN(1, "Buffer overflow detected!\n");
19654 + switch (x.seg) {
19655 + case 0:
19656 + loadsegment(gs, 0);
19657 + break;
19658 + case TASK_SIZE_MAX:
19659 + loadsegment(gs, __USER_DS);
19660 + break;
19661 + case -1UL:
19662 + loadsegment(gs, __KERNEL_DS);
19663 + break;
19664 + default:
19665 + BUG();
19666 + }
19667 + return;
19668 }
19669 -EXPORT_SYMBOL(copy_from_user_overflow);
19670 +EXPORT_SYMBOL(__set_fs);
19671 +
19672 +void set_fs(mm_segment_t x)
19673 +{
19674 + current_thread_info()->addr_limit = x;
19675 + __set_fs(x);
19676 +}
19677 +EXPORT_SYMBOL(set_fs);
19678 +#endif
19679 diff -urNp linux-3.0.8/arch/x86/lib/usercopy_64.c linux-3.0.8/arch/x86/lib/usercopy_64.c
19680 --- linux-3.0.8/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
19681 +++ linux-3.0.8/arch/x86/lib/usercopy_64.c 2011-10-06 04:17:55.000000000 -0400
19682 @@ -42,6 +42,12 @@ long
19683 __strncpy_from_user(char *dst, const char __user *src, long count)
19684 {
19685 long res;
19686 +
19687 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19688 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19689 + src += PAX_USER_SHADOW_BASE;
19690 +#endif
19691 +
19692 __do_strncpy_from_user(dst, src, count, res);
19693 return res;
19694 }
19695 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19696 {
19697 long __d0;
19698 might_fault();
19699 +
19700 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19701 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19702 + addr += PAX_USER_SHADOW_BASE;
19703 +#endif
19704 +
19705 /* no memory constraint because it doesn't change any memory gcc knows
19706 about */
19707 asm volatile(
19708 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19709
19710 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19711 {
19712 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19713 - return copy_user_generic((__force void *)to, (__force void *)from, len);
19714 - }
19715 - return len;
19716 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19717 +
19718 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19719 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19720 + to += PAX_USER_SHADOW_BASE;
19721 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19722 + from += PAX_USER_SHADOW_BASE;
19723 +#endif
19724 +
19725 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19726 + }
19727 + return len;
19728 }
19729 EXPORT_SYMBOL(copy_in_user);
19730
19731 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19732 * it is not necessary to optimize tail handling.
19733 */
19734 unsigned long
19735 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19736 +copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19737 {
19738 char c;
19739 unsigned zero_len;
19740 diff -urNp linux-3.0.8/arch/x86/Makefile linux-3.0.8/arch/x86/Makefile
19741 --- linux-3.0.8/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
19742 +++ linux-3.0.8/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
19743 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
19744 else
19745 BITS := 64
19746 UTS_MACHINE := x86_64
19747 + biarch := $(call cc-option,-m64)
19748 CHECKFLAGS += -D__x86_64__ -m64
19749
19750 KBUILD_AFLAGS += -m64
19751 @@ -195,3 +196,12 @@ define archhelp
19752 echo ' FDARGS="..." arguments for the booted kernel'
19753 echo ' FDINITRD=file initrd for the booted kernel'
19754 endef
19755 +
19756 +define OLD_LD
19757 +
19758 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19759 +*** Please upgrade your binutils to 2.18 or newer
19760 +endef
19761 +
19762 +archprepare:
19763 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19764 diff -urNp linux-3.0.8/arch/x86/mm/extable.c linux-3.0.8/arch/x86/mm/extable.c
19765 --- linux-3.0.8/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
19766 +++ linux-3.0.8/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
19767 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19768 const struct exception_table_entry *fixup;
19769
19770 #ifdef CONFIG_PNPBIOS
19771 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19772 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19773 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19774 extern u32 pnp_bios_is_utter_crap;
19775 pnp_bios_is_utter_crap = 1;
19776 diff -urNp linux-3.0.8/arch/x86/mm/fault.c linux-3.0.8/arch/x86/mm/fault.c
19777 --- linux-3.0.8/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
19778 +++ linux-3.0.8/arch/x86/mm/fault.c 2011-11-01 05:23:50.000000000 -0400
19779 @@ -13,10 +13,18 @@
19780 #include <linux/perf_event.h> /* perf_sw_event */
19781 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19782 #include <linux/prefetch.h> /* prefetchw */
19783 +#include <linux/unistd.h>
19784 +#include <linux/compiler.h>
19785
19786 #include <asm/traps.h> /* dotraplinkage, ... */
19787 #include <asm/pgalloc.h> /* pgd_*(), ... */
19788 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19789 +#include <asm/vsyscall.h>
19790 +#include <asm/tlbflush.h>
19791 +
19792 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19793 +#include <asm/stacktrace.h>
19794 +#endif
19795
19796 /*
19797 * Page fault error code bits:
19798 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
19799 int ret = 0;
19800
19801 /* kprobe_running() needs smp_processor_id() */
19802 - if (kprobes_built_in() && !user_mode_vm(regs)) {
19803 + if (kprobes_built_in() && !user_mode(regs)) {
19804 preempt_disable();
19805 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19806 ret = 1;
19807 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19808 return !instr_lo || (instr_lo>>1) == 1;
19809 case 0x00:
19810 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19811 - if (probe_kernel_address(instr, opcode))
19812 + if (user_mode(regs)) {
19813 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19814 + return 0;
19815 + } else if (probe_kernel_address(instr, opcode))
19816 return 0;
19817
19818 *prefetch = (instr_lo == 0xF) &&
19819 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19820 while (instr < max_instr) {
19821 unsigned char opcode;
19822
19823 - if (probe_kernel_address(instr, opcode))
19824 + if (user_mode(regs)) {
19825 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19826 + break;
19827 + } else if (probe_kernel_address(instr, opcode))
19828 break;
19829
19830 instr++;
19831 @@ -180,6 +194,34 @@ force_sig_info_fault(int si_signo, int s
19832 force_sig_info(si_signo, &info, tsk);
19833 }
19834
19835 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19836 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
19837 +#endif
19838 +
19839 +#ifdef CONFIG_PAX_EMUTRAMP
19840 +static int pax_handle_fetch_fault(struct pt_regs *regs);
19841 +#endif
19842 +
19843 +#ifdef CONFIG_PAX_PAGEEXEC
19844 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19845 +{
19846 + pgd_t *pgd;
19847 + pud_t *pud;
19848 + pmd_t *pmd;
19849 +
19850 + pgd = pgd_offset(mm, address);
19851 + if (!pgd_present(*pgd))
19852 + return NULL;
19853 + pud = pud_offset(pgd, address);
19854 + if (!pud_present(*pud))
19855 + return NULL;
19856 + pmd = pmd_offset(pud, address);
19857 + if (!pmd_present(*pmd))
19858 + return NULL;
19859 + return pmd;
19860 +}
19861 +#endif
19862 +
19863 DEFINE_SPINLOCK(pgd_lock);
19864 LIST_HEAD(pgd_list);
19865
19866 @@ -230,10 +272,22 @@ void vmalloc_sync_all(void)
19867 for (address = VMALLOC_START & PMD_MASK;
19868 address >= TASK_SIZE && address < FIXADDR_TOP;
19869 address += PMD_SIZE) {
19870 +
19871 +#ifdef CONFIG_PAX_PER_CPU_PGD
19872 + unsigned long cpu;
19873 +#else
19874 struct page *page;
19875 +#endif
19876
19877 spin_lock(&pgd_lock);
19878 +
19879 +#ifdef CONFIG_PAX_PER_CPU_PGD
19880 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19881 + pgd_t *pgd = get_cpu_pgd(cpu);
19882 + pmd_t *ret;
19883 +#else
19884 list_for_each_entry(page, &pgd_list, lru) {
19885 + pgd_t *pgd = page_address(page);
19886 spinlock_t *pgt_lock;
19887 pmd_t *ret;
19888
19889 @@ -241,8 +295,13 @@ void vmalloc_sync_all(void)
19890 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19891
19892 spin_lock(pgt_lock);
19893 - ret = vmalloc_sync_one(page_address(page), address);
19894 +#endif
19895 +
19896 + ret = vmalloc_sync_one(pgd, address);
19897 +
19898 +#ifndef CONFIG_PAX_PER_CPU_PGD
19899 spin_unlock(pgt_lock);
19900 +#endif
19901
19902 if (!ret)
19903 break;
19904 @@ -276,6 +335,11 @@ static noinline __kprobes int vmalloc_fa
19905 * an interrupt in the middle of a task switch..
19906 */
19907 pgd_paddr = read_cr3();
19908 +
19909 +#ifdef CONFIG_PAX_PER_CPU_PGD
19910 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19911 +#endif
19912 +
19913 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19914 if (!pmd_k)
19915 return -1;
19916 @@ -371,7 +435,14 @@ static noinline __kprobes int vmalloc_fa
19917 * happen within a race in page table update. In the later
19918 * case just flush:
19919 */
19920 +
19921 +#ifdef CONFIG_PAX_PER_CPU_PGD
19922 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19923 + pgd = pgd_offset_cpu(smp_processor_id(), address);
19924 +#else
19925 pgd = pgd_offset(current->active_mm, address);
19926 +#endif
19927 +
19928 pgd_ref = pgd_offset_k(address);
19929 if (pgd_none(*pgd_ref))
19930 return -1;
19931 @@ -533,7 +604,7 @@ static int is_errata93(struct pt_regs *r
19932 static int is_errata100(struct pt_regs *regs, unsigned long address)
19933 {
19934 #ifdef CONFIG_X86_64
19935 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19936 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19937 return 1;
19938 #endif
19939 return 0;
19940 @@ -560,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *r
19941 }
19942
19943 static const char nx_warning[] = KERN_CRIT
19944 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19945 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19946
19947 static void
19948 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19949 @@ -569,14 +640,25 @@ show_fault_oops(struct pt_regs *regs, un
19950 if (!oops_may_print())
19951 return;
19952
19953 - if (error_code & PF_INSTR) {
19954 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19955 unsigned int level;
19956
19957 pte_t *pte = lookup_address(address, &level);
19958
19959 if (pte && pte_present(*pte) && !pte_exec(*pte))
19960 - printk(nx_warning, current_uid());
19961 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19962 + }
19963 +
19964 +#ifdef CONFIG_PAX_KERNEXEC
19965 + if (init_mm.start_code <= address && address < init_mm.end_code) {
19966 + if (current->signal->curr_ip)
19967 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19968 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19969 + else
19970 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19971 + current->comm, task_pid_nr(current), current_uid(), current_euid());
19972 }
19973 +#endif
19974
19975 printk(KERN_ALERT "BUG: unable to handle kernel ");
19976 if (address < PAGE_SIZE)
19977 @@ -702,6 +784,20 @@ __bad_area_nosemaphore(struct pt_regs *r
19978 unsigned long address, int si_code)
19979 {
19980 struct task_struct *tsk = current;
19981 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19982 + struct mm_struct *mm = tsk->mm;
19983 +#endif
19984 +
19985 +#ifdef CONFIG_X86_64
19986 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19987 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19988 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19989 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19990 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19991 + return;
19992 + }
19993 + }
19994 +#endif
19995
19996 /* User mode accesses just cause a SIGSEGV */
19997 if (error_code & PF_USER) {
19998 @@ -720,6 +816,21 @@ __bad_area_nosemaphore(struct pt_regs *r
19999 if (is_errata100(regs, address))
20000 return;
20001
20002 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20003 + if (pax_is_fetch_fault(regs, error_code, address)) {
20004 +
20005 +#ifdef CONFIG_PAX_EMUTRAMP
20006 + switch (pax_handle_fetch_fault(regs)) {
20007 + case 2:
20008 + return;
20009 + }
20010 +#endif
20011 +
20012 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20013 + do_group_exit(SIGKILL);
20014 + }
20015 +#endif
20016 +
20017 if (unlikely(show_unhandled_signals))
20018 show_signal_msg(regs, error_code, address, tsk);
20019
20020 @@ -871,6 +982,99 @@ static int spurious_fault_check(unsigned
20021 return 1;
20022 }
20023
20024 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20025 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20026 +{
20027 + pte_t *pte;
20028 + pmd_t *pmd;
20029 + spinlock_t *ptl;
20030 + unsigned char pte_mask;
20031 +
20032 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20033 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20034 + return 0;
20035 +
20036 + /* PaX: it's our fault, let's handle it if we can */
20037 +
20038 + /* PaX: take a look at read faults before acquiring any locks */
20039 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20040 + /* instruction fetch attempt from a protected page in user mode */
20041 + up_read(&mm->mmap_sem);
20042 +
20043 +#ifdef CONFIG_PAX_EMUTRAMP
20044 + switch (pax_handle_fetch_fault(regs)) {
20045 + case 2:
20046 + return 1;
20047 + }
20048 +#endif
20049 +
20050 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20051 + do_group_exit(SIGKILL);
20052 + }
20053 +
20054 + pmd = pax_get_pmd(mm, address);
20055 + if (unlikely(!pmd))
20056 + return 0;
20057 +
20058 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20059 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20060 + pte_unmap_unlock(pte, ptl);
20061 + return 0;
20062 + }
20063 +
20064 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20065 + /* write attempt to a protected page in user mode */
20066 + pte_unmap_unlock(pte, ptl);
20067 + return 0;
20068 + }
20069 +
20070 +#ifdef CONFIG_SMP
20071 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20072 +#else
20073 + if (likely(address > get_limit(regs->cs)))
20074 +#endif
20075 + {
20076 + set_pte(pte, pte_mkread(*pte));
20077 + __flush_tlb_one(address);
20078 + pte_unmap_unlock(pte, ptl);
20079 + up_read(&mm->mmap_sem);
20080 + return 1;
20081 + }
20082 +
20083 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20084 +
20085 + /*
20086 + * PaX: fill DTLB with user rights and retry
20087 + */
20088 + __asm__ __volatile__ (
20089 + "orb %2,(%1)\n"
20090 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20091 +/*
20092 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20093 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20094 + * page fault when examined during a TLB load attempt. this is true not only
20095 + * for PTEs holding a non-present entry but also present entries that will
20096 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20097 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20098 + * for our target pages since their PTEs are simply not in the TLBs at all.
20099 +
20100 + * the best thing in omitting it is that we gain around 15-20% speed in the
20101 + * fast path of the page fault handler and can get rid of tracing since we
20102 + * can no longer flush unintended entries.
20103 + */
20104 + "invlpg (%0)\n"
20105 +#endif
20106 + __copyuser_seg"testb $0,(%0)\n"
20107 + "xorb %3,(%1)\n"
20108 + :
20109 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20110 + : "memory", "cc");
20111 + pte_unmap_unlock(pte, ptl);
20112 + up_read(&mm->mmap_sem);
20113 + return 1;
20114 +}
20115 +#endif
20116 +
20117 /*
20118 * Handle a spurious fault caused by a stale TLB entry.
20119 *
20120 @@ -943,6 +1147,9 @@ int show_unhandled_signals = 1;
20121 static inline int
20122 access_error(unsigned long error_code, struct vm_area_struct *vma)
20123 {
20124 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20125 + return 1;
20126 +
20127 if (error_code & PF_WRITE) {
20128 /* write, present and write, not present: */
20129 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20130 @@ -976,19 +1183,33 @@ do_page_fault(struct pt_regs *regs, unsi
20131 {
20132 struct vm_area_struct *vma;
20133 struct task_struct *tsk;
20134 - unsigned long address;
20135 struct mm_struct *mm;
20136 int fault;
20137 int write = error_code & PF_WRITE;
20138 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
20139 (write ? FAULT_FLAG_WRITE : 0);
20140
20141 + /* Get the faulting address: */
20142 + unsigned long address = read_cr2();
20143 +
20144 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20145 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20146 + if (!search_exception_tables(regs->ip)) {
20147 + bad_area_nosemaphore(regs, error_code, address);
20148 + return;
20149 + }
20150 + if (address < PAX_USER_SHADOW_BASE) {
20151 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20152 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20153 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20154 + } else
20155 + address -= PAX_USER_SHADOW_BASE;
20156 + }
20157 +#endif
20158 +
20159 tsk = current;
20160 mm = tsk->mm;
20161
20162 - /* Get the faulting address: */
20163 - address = read_cr2();
20164 -
20165 /*
20166 * Detect and handle instructions that would cause a page fault for
20167 * both a tracked kernel page and a userspace page.
20168 @@ -1048,7 +1269,7 @@ do_page_fault(struct pt_regs *regs, unsi
20169 * User-mode registers count as a user access even for any
20170 * potential system fault or CPU buglet:
20171 */
20172 - if (user_mode_vm(regs)) {
20173 + if (user_mode(regs)) {
20174 local_irq_enable();
20175 error_code |= PF_USER;
20176 } else {
20177 @@ -1103,6 +1324,11 @@ retry:
20178 might_sleep();
20179 }
20180
20181 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20182 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20183 + return;
20184 +#endif
20185 +
20186 vma = find_vma(mm, address);
20187 if (unlikely(!vma)) {
20188 bad_area(regs, error_code, address);
20189 @@ -1114,18 +1340,24 @@ retry:
20190 bad_area(regs, error_code, address);
20191 return;
20192 }
20193 - if (error_code & PF_USER) {
20194 - /*
20195 - * Accessing the stack below %sp is always a bug.
20196 - * The large cushion allows instructions like enter
20197 - * and pusha to work. ("enter $65535, $31" pushes
20198 - * 32 pointers and then decrements %sp by 65535.)
20199 - */
20200 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20201 - bad_area(regs, error_code, address);
20202 - return;
20203 - }
20204 + /*
20205 + * Accessing the stack below %sp is always a bug.
20206 + * The large cushion allows instructions like enter
20207 + * and pusha to work. ("enter $65535, $31" pushes
20208 + * 32 pointers and then decrements %sp by 65535.)
20209 + */
20210 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20211 + bad_area(regs, error_code, address);
20212 + return;
20213 }
20214 +
20215 +#ifdef CONFIG_PAX_SEGMEXEC
20216 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20217 + bad_area(regs, error_code, address);
20218 + return;
20219 + }
20220 +#endif
20221 +
20222 if (unlikely(expand_stack(vma, address))) {
20223 bad_area(regs, error_code, address);
20224 return;
20225 @@ -1180,3 +1412,230 @@ good_area:
20226
20227 up_read(&mm->mmap_sem);
20228 }
20229 +
20230 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20231 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
20232 +{
20233 + struct mm_struct *mm = current->mm;
20234 + unsigned long ip = regs->ip;
20235 +
20236 + if (v8086_mode(regs))
20237 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20238 +
20239 +#ifdef CONFIG_PAX_PAGEEXEC
20240 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
20241 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
20242 + return true;
20243 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
20244 + return true;
20245 + return false;
20246 + }
20247 +#endif
20248 +
20249 +#ifdef CONFIG_PAX_SEGMEXEC
20250 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
20251 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
20252 + return true;
20253 + return false;
20254 + }
20255 +#endif
20256 +
20257 + return false;
20258 +}
20259 +#endif
20260 +
20261 +#ifdef CONFIG_PAX_EMUTRAMP
20262 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20263 +{
20264 + int err;
20265 +
20266 + do { /* PaX: gcc trampoline emulation #1 */
20267 + unsigned char mov1, mov2;
20268 + unsigned short jmp;
20269 + unsigned int addr1, addr2;
20270 +
20271 +#ifdef CONFIG_X86_64
20272 + if ((regs->ip + 11) >> 32)
20273 + break;
20274 +#endif
20275 +
20276 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20277 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20278 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20279 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20280 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20281 +
20282 + if (err)
20283 + break;
20284 +
20285 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20286 + regs->cx = addr1;
20287 + regs->ax = addr2;
20288 + regs->ip = addr2;
20289 + return 2;
20290 + }
20291 + } while (0);
20292 +
20293 + do { /* PaX: gcc trampoline emulation #2 */
20294 + unsigned char mov, jmp;
20295 + unsigned int addr1, addr2;
20296 +
20297 +#ifdef CONFIG_X86_64
20298 + if ((regs->ip + 9) >> 32)
20299 + break;
20300 +#endif
20301 +
20302 + err = get_user(mov, (unsigned char __user *)regs->ip);
20303 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20304 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20305 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20306 +
20307 + if (err)
20308 + break;
20309 +
20310 + if (mov == 0xB9 && jmp == 0xE9) {
20311 + regs->cx = addr1;
20312 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20313 + return 2;
20314 + }
20315 + } while (0);
20316 +
20317 + return 1; /* PaX in action */
20318 +}
20319 +
20320 +#ifdef CONFIG_X86_64
20321 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20322 +{
20323 + int err;
20324 +
20325 + do { /* PaX: gcc trampoline emulation #1 */
20326 + unsigned short mov1, mov2, jmp1;
20327 + unsigned char jmp2;
20328 + unsigned int addr1;
20329 + unsigned long addr2;
20330 +
20331 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20332 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20333 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20334 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20335 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20336 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20337 +
20338 + if (err)
20339 + break;
20340 +
20341 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20342 + regs->r11 = addr1;
20343 + regs->r10 = addr2;
20344 + regs->ip = addr1;
20345 + return 2;
20346 + }
20347 + } while (0);
20348 +
20349 + do { /* PaX: gcc trampoline emulation #2 */
20350 + unsigned short mov1, mov2, jmp1;
20351 + unsigned char jmp2;
20352 + unsigned long addr1, addr2;
20353 +
20354 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20355 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20356 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20357 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20358 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20359 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20360 +
20361 + if (err)
20362 + break;
20363 +
20364 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20365 + regs->r11 = addr1;
20366 + regs->r10 = addr2;
20367 + regs->ip = addr1;
20368 + return 2;
20369 + }
20370 + } while (0);
20371 +
20372 + return 1; /* PaX in action */
20373 +}
20374 +#endif
20375 +
20376 +/*
20377 + * PaX: decide what to do with offenders (regs->ip = fault address)
20378 + *
20379 + * returns 1 when task should be killed
20380 + * 2 when gcc trampoline was detected
20381 + */
20382 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20383 +{
20384 + if (v8086_mode(regs))
20385 + return 1;
20386 +
20387 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20388 + return 1;
20389 +
20390 +#ifdef CONFIG_X86_32
20391 + return pax_handle_fetch_fault_32(regs);
20392 +#else
20393 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20394 + return pax_handle_fetch_fault_32(regs);
20395 + else
20396 + return pax_handle_fetch_fault_64(regs);
20397 +#endif
20398 +}
20399 +#endif
20400 +
20401 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20402 +void pax_report_insns(void *pc, void *sp)
20403 +{
20404 + long i;
20405 +
20406 + printk(KERN_ERR "PAX: bytes at PC: ");
20407 + for (i = 0; i < 20; i++) {
20408 + unsigned char c;
20409 + if (get_user(c, (unsigned char __force_user *)pc+i))
20410 + printk(KERN_CONT "?? ");
20411 + else
20412 + printk(KERN_CONT "%02x ", c);
20413 + }
20414 + printk("\n");
20415 +
20416 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20417 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
20418 + unsigned long c;
20419 + if (get_user(c, (unsigned long __force_user *)sp+i))
20420 +#ifdef CONFIG_X86_32
20421 + printk(KERN_CONT "???????? ");
20422 +#else
20423 + printk(KERN_CONT "???????????????? ");
20424 +#endif
20425 + else
20426 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20427 + }
20428 + printk("\n");
20429 +}
20430 +#endif
20431 +
20432 +/**
20433 + * probe_kernel_write(): safely attempt to write to a location
20434 + * @dst: address to write to
20435 + * @src: pointer to the data that shall be written
20436 + * @size: size of the data chunk
20437 + *
20438 + * Safely write to address @dst from the buffer at @src. If a kernel fault
20439 + * happens, handle that and return -EFAULT.
20440 + */
20441 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20442 +{
20443 + long ret;
20444 + mm_segment_t old_fs = get_fs();
20445 +
20446 + set_fs(KERNEL_DS);
20447 + pagefault_disable();
20448 + pax_open_kernel();
20449 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
20450 + pax_close_kernel();
20451 + pagefault_enable();
20452 + set_fs(old_fs);
20453 +
20454 + return ret ? -EFAULT : 0;
20455 +}
20456 diff -urNp linux-3.0.8/arch/x86/mm/gup.c linux-3.0.8/arch/x86/mm/gup.c
20457 --- linux-3.0.8/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
20458 +++ linux-3.0.8/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
20459 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
20460 addr = start;
20461 len = (unsigned long) nr_pages << PAGE_SHIFT;
20462 end = start + len;
20463 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20464 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20465 (void __user *)start, len)))
20466 return 0;
20467
20468 diff -urNp linux-3.0.8/arch/x86/mm/highmem_32.c linux-3.0.8/arch/x86/mm/highmem_32.c
20469 --- linux-3.0.8/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
20470 +++ linux-3.0.8/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
20471 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
20472 idx = type + KM_TYPE_NR*smp_processor_id();
20473 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20474 BUG_ON(!pte_none(*(kmap_pte-idx)));
20475 +
20476 + pax_open_kernel();
20477 set_pte(kmap_pte-idx, mk_pte(page, prot));
20478 + pax_close_kernel();
20479
20480 return (void *)vaddr;
20481 }
20482 diff -urNp linux-3.0.8/arch/x86/mm/hugetlbpage.c linux-3.0.8/arch/x86/mm/hugetlbpage.c
20483 --- linux-3.0.8/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
20484 +++ linux-3.0.8/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
20485 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
20486 struct hstate *h = hstate_file(file);
20487 struct mm_struct *mm = current->mm;
20488 struct vm_area_struct *vma;
20489 - unsigned long start_addr;
20490 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20491 +
20492 +#ifdef CONFIG_PAX_SEGMEXEC
20493 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20494 + pax_task_size = SEGMEXEC_TASK_SIZE;
20495 +#endif
20496 +
20497 + pax_task_size -= PAGE_SIZE;
20498
20499 if (len > mm->cached_hole_size) {
20500 - start_addr = mm->free_area_cache;
20501 + start_addr = mm->free_area_cache;
20502 } else {
20503 - start_addr = TASK_UNMAPPED_BASE;
20504 - mm->cached_hole_size = 0;
20505 + start_addr = mm->mmap_base;
20506 + mm->cached_hole_size = 0;
20507 }
20508
20509 full_search:
20510 @@ -280,26 +287,27 @@ full_search:
20511
20512 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20513 /* At this point: (!vma || addr < vma->vm_end). */
20514 - if (TASK_SIZE - len < addr) {
20515 + if (pax_task_size - len < addr) {
20516 /*
20517 * Start a new search - just in case we missed
20518 * some holes.
20519 */
20520 - if (start_addr != TASK_UNMAPPED_BASE) {
20521 - start_addr = TASK_UNMAPPED_BASE;
20522 + if (start_addr != mm->mmap_base) {
20523 + start_addr = mm->mmap_base;
20524 mm->cached_hole_size = 0;
20525 goto full_search;
20526 }
20527 return -ENOMEM;
20528 }
20529 - if (!vma || addr + len <= vma->vm_start) {
20530 - mm->free_area_cache = addr + len;
20531 - return addr;
20532 - }
20533 + if (check_heap_stack_gap(vma, addr, len))
20534 + break;
20535 if (addr + mm->cached_hole_size < vma->vm_start)
20536 mm->cached_hole_size = vma->vm_start - addr;
20537 addr = ALIGN(vma->vm_end, huge_page_size(h));
20538 }
20539 +
20540 + mm->free_area_cache = addr + len;
20541 + return addr;
20542 }
20543
20544 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20545 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20546 {
20547 struct hstate *h = hstate_file(file);
20548 struct mm_struct *mm = current->mm;
20549 - struct vm_area_struct *vma, *prev_vma;
20550 - unsigned long base = mm->mmap_base, addr = addr0;
20551 + struct vm_area_struct *vma;
20552 + unsigned long base = mm->mmap_base, addr;
20553 unsigned long largest_hole = mm->cached_hole_size;
20554 - int first_time = 1;
20555
20556 /* don't allow allocations above current base */
20557 if (mm->free_area_cache > base)
20558 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20559 largest_hole = 0;
20560 mm->free_area_cache = base;
20561 }
20562 -try_again:
20563 +
20564 /* make sure it can fit in the remaining address space */
20565 if (mm->free_area_cache < len)
20566 goto fail;
20567
20568 /* either no address requested or can't fit in requested address hole */
20569 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20570 + addr = (mm->free_area_cache - len);
20571 do {
20572 + addr &= huge_page_mask(h);
20573 + vma = find_vma(mm, addr);
20574 /*
20575 * Lookup failure means no vma is above this address,
20576 * i.e. return with success:
20577 - */
20578 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20579 - return addr;
20580 -
20581 - /*
20582 * new region fits between prev_vma->vm_end and
20583 * vma->vm_start, use it:
20584 */
20585 - if (addr + len <= vma->vm_start &&
20586 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20587 + if (check_heap_stack_gap(vma, addr, len)) {
20588 /* remember the address as a hint for next time */
20589 - mm->cached_hole_size = largest_hole;
20590 - return (mm->free_area_cache = addr);
20591 - } else {
20592 - /* pull free_area_cache down to the first hole */
20593 - if (mm->free_area_cache == vma->vm_end) {
20594 - mm->free_area_cache = vma->vm_start;
20595 - mm->cached_hole_size = largest_hole;
20596 - }
20597 + mm->cached_hole_size = largest_hole;
20598 + return (mm->free_area_cache = addr);
20599 + }
20600 + /* pull free_area_cache down to the first hole */
20601 + if (mm->free_area_cache == vma->vm_end) {
20602 + mm->free_area_cache = vma->vm_start;
20603 + mm->cached_hole_size = largest_hole;
20604 }
20605
20606 /* remember the largest hole we saw so far */
20607 if (addr + largest_hole < vma->vm_start)
20608 - largest_hole = vma->vm_start - addr;
20609 + largest_hole = vma->vm_start - addr;
20610
20611 /* try just below the current vma->vm_start */
20612 - addr = (vma->vm_start - len) & huge_page_mask(h);
20613 - } while (len <= vma->vm_start);
20614 + addr = skip_heap_stack_gap(vma, len);
20615 + } while (!IS_ERR_VALUE(addr));
20616
20617 fail:
20618 /*
20619 - * if hint left us with no space for the requested
20620 - * mapping then try again:
20621 - */
20622 - if (first_time) {
20623 - mm->free_area_cache = base;
20624 - largest_hole = 0;
20625 - first_time = 0;
20626 - goto try_again;
20627 - }
20628 - /*
20629 * A failed mmap() very likely causes application failure,
20630 * so fall back to the bottom-up function here. This scenario
20631 * can happen with large stack limits and large mmap()
20632 * allocations.
20633 */
20634 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20635 +
20636 +#ifdef CONFIG_PAX_SEGMEXEC
20637 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20638 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20639 + else
20640 +#endif
20641 +
20642 + mm->mmap_base = TASK_UNMAPPED_BASE;
20643 +
20644 +#ifdef CONFIG_PAX_RANDMMAP
20645 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20646 + mm->mmap_base += mm->delta_mmap;
20647 +#endif
20648 +
20649 + mm->free_area_cache = mm->mmap_base;
20650 mm->cached_hole_size = ~0UL;
20651 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20652 len, pgoff, flags);
20653 @@ -386,6 +392,7 @@ fail:
20654 /*
20655 * Restore the topdown base:
20656 */
20657 + mm->mmap_base = base;
20658 mm->free_area_cache = base;
20659 mm->cached_hole_size = ~0UL;
20660
20661 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20662 struct hstate *h = hstate_file(file);
20663 struct mm_struct *mm = current->mm;
20664 struct vm_area_struct *vma;
20665 + unsigned long pax_task_size = TASK_SIZE;
20666
20667 if (len & ~huge_page_mask(h))
20668 return -EINVAL;
20669 - if (len > TASK_SIZE)
20670 +
20671 +#ifdef CONFIG_PAX_SEGMEXEC
20672 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20673 + pax_task_size = SEGMEXEC_TASK_SIZE;
20674 +#endif
20675 +
20676 + pax_task_size -= PAGE_SIZE;
20677 +
20678 + if (len > pax_task_size)
20679 return -ENOMEM;
20680
20681 if (flags & MAP_FIXED) {
20682 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20683 if (addr) {
20684 addr = ALIGN(addr, huge_page_size(h));
20685 vma = find_vma(mm, addr);
20686 - if (TASK_SIZE - len >= addr &&
20687 - (!vma || addr + len <= vma->vm_start))
20688 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20689 return addr;
20690 }
20691 if (mm->get_unmapped_area == arch_get_unmapped_area)
20692 diff -urNp linux-3.0.8/arch/x86/mm/init_32.c linux-3.0.8/arch/x86/mm/init_32.c
20693 --- linux-3.0.8/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
20694 +++ linux-3.0.8/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
20695 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20696 }
20697
20698 /*
20699 - * Creates a middle page table and puts a pointer to it in the
20700 - * given global directory entry. This only returns the gd entry
20701 - * in non-PAE compilation mode, since the middle layer is folded.
20702 - */
20703 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20704 -{
20705 - pud_t *pud;
20706 - pmd_t *pmd_table;
20707 -
20708 -#ifdef CONFIG_X86_PAE
20709 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20710 - if (after_bootmem)
20711 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20712 - else
20713 - pmd_table = (pmd_t *)alloc_low_page();
20714 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20715 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20716 - pud = pud_offset(pgd, 0);
20717 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20718 -
20719 - return pmd_table;
20720 - }
20721 -#endif
20722 - pud = pud_offset(pgd, 0);
20723 - pmd_table = pmd_offset(pud, 0);
20724 -
20725 - return pmd_table;
20726 -}
20727 -
20728 -/*
20729 * Create a page table and place a pointer to it in a middle page
20730 * directory entry:
20731 */
20732 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20733 page_table = (pte_t *)alloc_low_page();
20734
20735 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20736 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20737 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20738 +#else
20739 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20740 +#endif
20741 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20742 }
20743
20744 return pte_offset_kernel(pmd, 0);
20745 }
20746
20747 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20748 +{
20749 + pud_t *pud;
20750 + pmd_t *pmd_table;
20751 +
20752 + pud = pud_offset(pgd, 0);
20753 + pmd_table = pmd_offset(pud, 0);
20754 +
20755 + return pmd_table;
20756 +}
20757 +
20758 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20759 {
20760 int pgd_idx = pgd_index(vaddr);
20761 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20762 int pgd_idx, pmd_idx;
20763 unsigned long vaddr;
20764 pgd_t *pgd;
20765 + pud_t *pud;
20766 pmd_t *pmd;
20767 pte_t *pte = NULL;
20768
20769 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20770 pgd = pgd_base + pgd_idx;
20771
20772 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20773 - pmd = one_md_table_init(pgd);
20774 - pmd = pmd + pmd_index(vaddr);
20775 + pud = pud_offset(pgd, vaddr);
20776 + pmd = pmd_offset(pud, vaddr);
20777 +
20778 +#ifdef CONFIG_X86_PAE
20779 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20780 +#endif
20781 +
20782 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20783 pmd++, pmd_idx++) {
20784 pte = page_table_kmap_check(one_page_table_init(pmd),
20785 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20786 }
20787 }
20788
20789 -static inline int is_kernel_text(unsigned long addr)
20790 +static inline int is_kernel_text(unsigned long start, unsigned long end)
20791 {
20792 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20793 - return 1;
20794 - return 0;
20795 + if ((start > ktla_ktva((unsigned long)_etext) ||
20796 + end <= ktla_ktva((unsigned long)_stext)) &&
20797 + (start > ktla_ktva((unsigned long)_einittext) ||
20798 + end <= ktla_ktva((unsigned long)_sinittext)) &&
20799 +
20800 +#ifdef CONFIG_ACPI_SLEEP
20801 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20802 +#endif
20803 +
20804 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20805 + return 0;
20806 + return 1;
20807 }
20808
20809 /*
20810 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20811 unsigned long last_map_addr = end;
20812 unsigned long start_pfn, end_pfn;
20813 pgd_t *pgd_base = swapper_pg_dir;
20814 - int pgd_idx, pmd_idx, pte_ofs;
20815 + unsigned int pgd_idx, pmd_idx, pte_ofs;
20816 unsigned long pfn;
20817 pgd_t *pgd;
20818 + pud_t *pud;
20819 pmd_t *pmd;
20820 pte_t *pte;
20821 unsigned pages_2m, pages_4k;
20822 @@ -281,8 +282,13 @@ repeat:
20823 pfn = start_pfn;
20824 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20825 pgd = pgd_base + pgd_idx;
20826 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20827 - pmd = one_md_table_init(pgd);
20828 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20829 + pud = pud_offset(pgd, 0);
20830 + pmd = pmd_offset(pud, 0);
20831 +
20832 +#ifdef CONFIG_X86_PAE
20833 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20834 +#endif
20835
20836 if (pfn >= end_pfn)
20837 continue;
20838 @@ -294,14 +300,13 @@ repeat:
20839 #endif
20840 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20841 pmd++, pmd_idx++) {
20842 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20843 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20844
20845 /*
20846 * Map with big pages if possible, otherwise
20847 * create normal page tables:
20848 */
20849 if (use_pse) {
20850 - unsigned int addr2;
20851 pgprot_t prot = PAGE_KERNEL_LARGE;
20852 /*
20853 * first pass will use the same initial
20854 @@ -311,11 +316,7 @@ repeat:
20855 __pgprot(PTE_IDENT_ATTR |
20856 _PAGE_PSE);
20857
20858 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20859 - PAGE_OFFSET + PAGE_SIZE-1;
20860 -
20861 - if (is_kernel_text(addr) ||
20862 - is_kernel_text(addr2))
20863 + if (is_kernel_text(address, address + PMD_SIZE))
20864 prot = PAGE_KERNEL_LARGE_EXEC;
20865
20866 pages_2m++;
20867 @@ -332,7 +333,7 @@ repeat:
20868 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20869 pte += pte_ofs;
20870 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20871 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20872 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20873 pgprot_t prot = PAGE_KERNEL;
20874 /*
20875 * first pass will use the same initial
20876 @@ -340,7 +341,7 @@ repeat:
20877 */
20878 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20879
20880 - if (is_kernel_text(addr))
20881 + if (is_kernel_text(address, address + PAGE_SIZE))
20882 prot = PAGE_KERNEL_EXEC;
20883
20884 pages_4k++;
20885 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20886
20887 pud = pud_offset(pgd, va);
20888 pmd = pmd_offset(pud, va);
20889 - if (!pmd_present(*pmd))
20890 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
20891 break;
20892
20893 pte = pte_offset_kernel(pmd, va);
20894 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20895
20896 static void __init pagetable_init(void)
20897 {
20898 - pgd_t *pgd_base = swapper_pg_dir;
20899 -
20900 - permanent_kmaps_init(pgd_base);
20901 + permanent_kmaps_init(swapper_pg_dir);
20902 }
20903
20904 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20905 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20906 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20907
20908 /* user-defined highmem size */
20909 @@ -757,6 +756,12 @@ void __init mem_init(void)
20910
20911 pci_iommu_alloc();
20912
20913 +#ifdef CONFIG_PAX_PER_CPU_PGD
20914 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20915 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20916 + KERNEL_PGD_PTRS);
20917 +#endif
20918 +
20919 #ifdef CONFIG_FLATMEM
20920 BUG_ON(!mem_map);
20921 #endif
20922 @@ -774,7 +779,7 @@ void __init mem_init(void)
20923 set_highmem_pages_init();
20924
20925 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20926 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20927 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20928 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20929
20930 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20931 @@ -815,10 +820,10 @@ void __init mem_init(void)
20932 ((unsigned long)&__init_end -
20933 (unsigned long)&__init_begin) >> 10,
20934
20935 - (unsigned long)&_etext, (unsigned long)&_edata,
20936 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20937 + (unsigned long)&_sdata, (unsigned long)&_edata,
20938 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20939
20940 - (unsigned long)&_text, (unsigned long)&_etext,
20941 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20942 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20943
20944 /*
20945 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20946 if (!kernel_set_to_readonly)
20947 return;
20948
20949 + start = ktla_ktva(start);
20950 pr_debug("Set kernel text: %lx - %lx for read write\n",
20951 start, start+size);
20952
20953 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20954 if (!kernel_set_to_readonly)
20955 return;
20956
20957 + start = ktla_ktva(start);
20958 pr_debug("Set kernel text: %lx - %lx for read only\n",
20959 start, start+size);
20960
20961 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20962 unsigned long start = PFN_ALIGN(_text);
20963 unsigned long size = PFN_ALIGN(_etext) - start;
20964
20965 + start = ktla_ktva(start);
20966 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20967 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20968 size >> 10);
20969 diff -urNp linux-3.0.8/arch/x86/mm/init_64.c linux-3.0.8/arch/x86/mm/init_64.c
20970 --- linux-3.0.8/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20971 +++ linux-3.0.8/arch/x86/mm/init_64.c 2011-10-06 04:17:55.000000000 -0400
20972 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20973 * around without checking the pgd every time.
20974 */
20975
20976 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20977 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20978 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20979
20980 int force_personality32;
20981 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20982
20983 for (address = start; address <= end; address += PGDIR_SIZE) {
20984 const pgd_t *pgd_ref = pgd_offset_k(address);
20985 +
20986 +#ifdef CONFIG_PAX_PER_CPU_PGD
20987 + unsigned long cpu;
20988 +#else
20989 struct page *page;
20990 +#endif
20991
20992 if (pgd_none(*pgd_ref))
20993 continue;
20994
20995 spin_lock(&pgd_lock);
20996 +
20997 +#ifdef CONFIG_PAX_PER_CPU_PGD
20998 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20999 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
21000 +#else
21001 list_for_each_entry(page, &pgd_list, lru) {
21002 pgd_t *pgd;
21003 spinlock_t *pgt_lock;
21004 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
21005 /* the pgt_lock only for Xen */
21006 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21007 spin_lock(pgt_lock);
21008 +#endif
21009
21010 if (pgd_none(*pgd))
21011 set_pgd(pgd, *pgd_ref);
21012 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
21013 BUG_ON(pgd_page_vaddr(*pgd)
21014 != pgd_page_vaddr(*pgd_ref));
21015
21016 +#ifndef CONFIG_PAX_PER_CPU_PGD
21017 spin_unlock(pgt_lock);
21018 +#endif
21019 +
21020 }
21021 spin_unlock(&pgd_lock);
21022 }
21023 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21024 pmd = fill_pmd(pud, vaddr);
21025 pte = fill_pte(pmd, vaddr);
21026
21027 + pax_open_kernel();
21028 set_pte(pte, new_pte);
21029 + pax_close_kernel();
21030
21031 /*
21032 * It's enough to flush this one mapping.
21033 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
21034 pgd = pgd_offset_k((unsigned long)__va(phys));
21035 if (pgd_none(*pgd)) {
21036 pud = (pud_t *) spp_getpage();
21037 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21038 - _PAGE_USER));
21039 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21040 }
21041 pud = pud_offset(pgd, (unsigned long)__va(phys));
21042 if (pud_none(*pud)) {
21043 pmd = (pmd_t *) spp_getpage();
21044 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21045 - _PAGE_USER));
21046 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21047 }
21048 pmd = pmd_offset(pud, phys);
21049 BUG_ON(!pmd_none(*pmd));
21050 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
21051 if (pfn >= pgt_buf_top)
21052 panic("alloc_low_page: ran out of memory");
21053
21054 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21055 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21056 clear_page(adr);
21057 *phys = pfn * PAGE_SIZE;
21058 return adr;
21059 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
21060
21061 phys = __pa(virt);
21062 left = phys & (PAGE_SIZE - 1);
21063 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21064 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21065 adr = (void *)(((unsigned long)adr) | left);
21066
21067 return adr;
21068 @@ -693,6 +707,12 @@ void __init mem_init(void)
21069
21070 pci_iommu_alloc();
21071
21072 +#ifdef CONFIG_PAX_PER_CPU_PGD
21073 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21074 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21075 + KERNEL_PGD_PTRS);
21076 +#endif
21077 +
21078 /* clear_bss() already clear the empty_zero_page */
21079
21080 reservedpages = 0;
21081 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
21082 static struct vm_area_struct gate_vma = {
21083 .vm_start = VSYSCALL_START,
21084 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21085 - .vm_page_prot = PAGE_READONLY_EXEC,
21086 - .vm_flags = VM_READ | VM_EXEC
21087 + .vm_page_prot = PAGE_READONLY,
21088 + .vm_flags = VM_READ
21089 };
21090
21091 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
21092 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
21093
21094 const char *arch_vma_name(struct vm_area_struct *vma)
21095 {
21096 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21097 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21098 return "[vdso]";
21099 if (vma == &gate_vma)
21100 return "[vsyscall]";
21101 diff -urNp linux-3.0.8/arch/x86/mm/init.c linux-3.0.8/arch/x86/mm/init.c
21102 --- linux-3.0.8/arch/x86/mm/init.c 2011-10-25 09:10:33.000000000 -0400
21103 +++ linux-3.0.8/arch/x86/mm/init.c 2011-10-25 09:10:41.000000000 -0400
21104 @@ -31,7 +31,7 @@ int direct_gbpages
21105 static void __init find_early_table_space(unsigned long end, int use_pse,
21106 int use_gbpages)
21107 {
21108 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
21109 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
21110 phys_addr_t base;
21111
21112 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
21113 @@ -312,12 +312,34 @@ unsigned long __init_refok init_memory_m
21114 */
21115 int devmem_is_allowed(unsigned long pagenr)
21116 {
21117 - if (pagenr <= 256)
21118 +#ifdef CONFIG_GRKERNSEC_KMEM
21119 + /* allow BDA */
21120 + if (!pagenr)
21121 + return 1;
21122 + /* allow EBDA */
21123 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21124 + return 1;
21125 +#else
21126 + if (!pagenr)
21127 + return 1;
21128 +#ifdef CONFIG_VM86
21129 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21130 + return 1;
21131 +#endif
21132 +#endif
21133 +
21134 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21135 return 1;
21136 +#ifdef CONFIG_GRKERNSEC_KMEM
21137 + /* throw out everything else below 1MB */
21138 + if (pagenr <= 256)
21139 + return 0;
21140 +#endif
21141 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21142 return 0;
21143 if (!page_is_ram(pagenr))
21144 return 1;
21145 +
21146 return 0;
21147 }
21148
21149 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigne
21150
21151 void free_initmem(void)
21152 {
21153 +
21154 +#ifdef CONFIG_PAX_KERNEXEC
21155 +#ifdef CONFIG_X86_32
21156 + /* PaX: limit KERNEL_CS to actual size */
21157 + unsigned long addr, limit;
21158 + struct desc_struct d;
21159 + int cpu;
21160 +
21161 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21162 + limit = (limit - 1UL) >> PAGE_SHIFT;
21163 +
21164 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21165 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21166 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21167 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21168 + }
21169 +
21170 + /* PaX: make KERNEL_CS read-only */
21171 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21172 + if (!paravirt_enabled())
21173 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21174 +/*
21175 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21176 + pgd = pgd_offset_k(addr);
21177 + pud = pud_offset(pgd, addr);
21178 + pmd = pmd_offset(pud, addr);
21179 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21180 + }
21181 +*/
21182 +#ifdef CONFIG_X86_PAE
21183 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21184 +/*
21185 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21186 + pgd = pgd_offset_k(addr);
21187 + pud = pud_offset(pgd, addr);
21188 + pmd = pmd_offset(pud, addr);
21189 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21190 + }
21191 +*/
21192 +#endif
21193 +
21194 +#ifdef CONFIG_MODULES
21195 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21196 +#endif
21197 +
21198 +#else
21199 + pgd_t *pgd;
21200 + pud_t *pud;
21201 + pmd_t *pmd;
21202 + unsigned long addr, end;
21203 +
21204 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21205 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21206 + pgd = pgd_offset_k(addr);
21207 + pud = pud_offset(pgd, addr);
21208 + pmd = pmd_offset(pud, addr);
21209 + if (!pmd_present(*pmd))
21210 + continue;
21211 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21212 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21213 + else
21214 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21215 + }
21216 +
21217 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21218 + end = addr + KERNEL_IMAGE_SIZE;
21219 + for (; addr < end; addr += PMD_SIZE) {
21220 + pgd = pgd_offset_k(addr);
21221 + pud = pud_offset(pgd, addr);
21222 + pmd = pmd_offset(pud, addr);
21223 + if (!pmd_present(*pmd))
21224 + continue;
21225 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21226 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21227 + }
21228 +#endif
21229 +
21230 + flush_tlb_all();
21231 +#endif
21232 +
21233 free_init_pages("unused kernel memory",
21234 (unsigned long)(&__init_begin),
21235 (unsigned long)(&__init_end));
21236 diff -urNp linux-3.0.8/arch/x86/mm/iomap_32.c linux-3.0.8/arch/x86/mm/iomap_32.c
21237 --- linux-3.0.8/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
21238 +++ linux-3.0.8/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
21239 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21240 type = kmap_atomic_idx_push();
21241 idx = type + KM_TYPE_NR * smp_processor_id();
21242 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21243 +
21244 + pax_open_kernel();
21245 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21246 + pax_close_kernel();
21247 +
21248 arch_flush_lazy_mmu_mode();
21249
21250 return (void *)vaddr;
21251 diff -urNp linux-3.0.8/arch/x86/mm/ioremap.c linux-3.0.8/arch/x86/mm/ioremap.c
21252 --- linux-3.0.8/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
21253 +++ linux-3.0.8/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
21254 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
21255 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
21256 int is_ram = page_is_ram(pfn);
21257
21258 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21259 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21260 return NULL;
21261 WARN_ON_ONCE(is_ram);
21262 }
21263 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
21264 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21265
21266 static __initdata int after_paging_init;
21267 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21268 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21269
21270 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21271 {
21272 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
21273 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21274
21275 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21276 - memset(bm_pte, 0, sizeof(bm_pte));
21277 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21278 + pmd_populate_user(&init_mm, pmd, bm_pte);
21279
21280 /*
21281 * The boot-ioremap range spans multiple pmds, for which
21282 diff -urNp linux-3.0.8/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.8/arch/x86/mm/kmemcheck/kmemcheck.c
21283 --- linux-3.0.8/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
21284 +++ linux-3.0.8/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
21285 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21286 * memory (e.g. tracked pages)? For now, we need this to avoid
21287 * invoking kmemcheck for PnP BIOS calls.
21288 */
21289 - if (regs->flags & X86_VM_MASK)
21290 + if (v8086_mode(regs))
21291 return false;
21292 - if (regs->cs != __KERNEL_CS)
21293 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21294 return false;
21295
21296 pte = kmemcheck_pte_lookup(address);
21297 diff -urNp linux-3.0.8/arch/x86/mm/mmap.c linux-3.0.8/arch/x86/mm/mmap.c
21298 --- linux-3.0.8/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
21299 +++ linux-3.0.8/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
21300 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21301 * Leave an at least ~128 MB hole with possible stack randomization.
21302 */
21303 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21304 -#define MAX_GAP (TASK_SIZE/6*5)
21305 +#define MAX_GAP (pax_task_size/6*5)
21306
21307 /*
21308 * True on X86_32 or when emulating IA32 on X86_64
21309 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21310 return rnd << PAGE_SHIFT;
21311 }
21312
21313 -static unsigned long mmap_base(void)
21314 +static unsigned long mmap_base(struct mm_struct *mm)
21315 {
21316 unsigned long gap = rlimit(RLIMIT_STACK);
21317 + unsigned long pax_task_size = TASK_SIZE;
21318 +
21319 +#ifdef CONFIG_PAX_SEGMEXEC
21320 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21321 + pax_task_size = SEGMEXEC_TASK_SIZE;
21322 +#endif
21323
21324 if (gap < MIN_GAP)
21325 gap = MIN_GAP;
21326 else if (gap > MAX_GAP)
21327 gap = MAX_GAP;
21328
21329 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21330 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21331 }
21332
21333 /*
21334 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21335 * does, but not when emulating X86_32
21336 */
21337 -static unsigned long mmap_legacy_base(void)
21338 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21339 {
21340 - if (mmap_is_ia32())
21341 + if (mmap_is_ia32()) {
21342 +
21343 +#ifdef CONFIG_PAX_SEGMEXEC
21344 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21345 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21346 + else
21347 +#endif
21348 +
21349 return TASK_UNMAPPED_BASE;
21350 - else
21351 + } else
21352 return TASK_UNMAPPED_BASE + mmap_rnd();
21353 }
21354
21355 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21356 void arch_pick_mmap_layout(struct mm_struct *mm)
21357 {
21358 if (mmap_is_legacy()) {
21359 - mm->mmap_base = mmap_legacy_base();
21360 + mm->mmap_base = mmap_legacy_base(mm);
21361 +
21362 +#ifdef CONFIG_PAX_RANDMMAP
21363 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21364 + mm->mmap_base += mm->delta_mmap;
21365 +#endif
21366 +
21367 mm->get_unmapped_area = arch_get_unmapped_area;
21368 mm->unmap_area = arch_unmap_area;
21369 } else {
21370 - mm->mmap_base = mmap_base();
21371 + mm->mmap_base = mmap_base(mm);
21372 +
21373 +#ifdef CONFIG_PAX_RANDMMAP
21374 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21375 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21376 +#endif
21377 +
21378 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21379 mm->unmap_area = arch_unmap_area_topdown;
21380 }
21381 diff -urNp linux-3.0.8/arch/x86/mm/mmio-mod.c linux-3.0.8/arch/x86/mm/mmio-mod.c
21382 --- linux-3.0.8/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
21383 +++ linux-3.0.8/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
21384 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
21385 break;
21386 default:
21387 {
21388 - unsigned char *ip = (unsigned char *)instptr;
21389 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
21390 my_trace->opcode = MMIO_UNKNOWN_OP;
21391 my_trace->width = 0;
21392 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
21393 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
21394 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21395 void __iomem *addr)
21396 {
21397 - static atomic_t next_id;
21398 + static atomic_unchecked_t next_id;
21399 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21400 /* These are page-unaligned. */
21401 struct mmiotrace_map map = {
21402 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
21403 .private = trace
21404 },
21405 .phys = offset,
21406 - .id = atomic_inc_return(&next_id)
21407 + .id = atomic_inc_return_unchecked(&next_id)
21408 };
21409 map.map_id = trace->id;
21410
21411 diff -urNp linux-3.0.8/arch/x86/mm/pageattr.c linux-3.0.8/arch/x86/mm/pageattr.c
21412 --- linux-3.0.8/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
21413 +++ linux-3.0.8/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
21414 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
21415 */
21416 #ifdef CONFIG_PCI_BIOS
21417 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21418 - pgprot_val(forbidden) |= _PAGE_NX;
21419 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21420 #endif
21421
21422 /*
21423 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
21424 * Does not cover __inittext since that is gone later on. On
21425 * 64bit we do not enforce !NX on the low mapping
21426 */
21427 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
21428 - pgprot_val(forbidden) |= _PAGE_NX;
21429 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21430 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21431
21432 +#ifdef CONFIG_DEBUG_RODATA
21433 /*
21434 * The .rodata section needs to be read-only. Using the pfn
21435 * catches all aliases.
21436 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
21437 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21438 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21439 pgprot_val(forbidden) |= _PAGE_RW;
21440 +#endif
21441
21442 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
21443 /*
21444 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
21445 }
21446 #endif
21447
21448 +#ifdef CONFIG_PAX_KERNEXEC
21449 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21450 + pgprot_val(forbidden) |= _PAGE_RW;
21451 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21452 + }
21453 +#endif
21454 +
21455 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21456
21457 return prot;
21458 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21459 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21460 {
21461 /* change init_mm */
21462 + pax_open_kernel();
21463 set_pte_atomic(kpte, pte);
21464 +
21465 #ifdef CONFIG_X86_32
21466 if (!SHARED_KERNEL_PMD) {
21467 +
21468 +#ifdef CONFIG_PAX_PER_CPU_PGD
21469 + unsigned long cpu;
21470 +#else
21471 struct page *page;
21472 +#endif
21473
21474 +#ifdef CONFIG_PAX_PER_CPU_PGD
21475 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21476 + pgd_t *pgd = get_cpu_pgd(cpu);
21477 +#else
21478 list_for_each_entry(page, &pgd_list, lru) {
21479 - pgd_t *pgd;
21480 + pgd_t *pgd = (pgd_t *)page_address(page);
21481 +#endif
21482 +
21483 pud_t *pud;
21484 pmd_t *pmd;
21485
21486 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
21487 + pgd += pgd_index(address);
21488 pud = pud_offset(pgd, address);
21489 pmd = pmd_offset(pud, address);
21490 set_pte_atomic((pte_t *)pmd, pte);
21491 }
21492 }
21493 #endif
21494 + pax_close_kernel();
21495 }
21496
21497 static int
21498 diff -urNp linux-3.0.8/arch/x86/mm/pageattr-test.c linux-3.0.8/arch/x86/mm/pageattr-test.c
21499 --- linux-3.0.8/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
21500 +++ linux-3.0.8/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
21501 @@ -36,7 +36,7 @@ enum {
21502
21503 static int pte_testbit(pte_t pte)
21504 {
21505 - return pte_flags(pte) & _PAGE_UNUSED1;
21506 + return pte_flags(pte) & _PAGE_CPA_TEST;
21507 }
21508
21509 struct split_state {
21510 diff -urNp linux-3.0.8/arch/x86/mm/pat.c linux-3.0.8/arch/x86/mm/pat.c
21511 --- linux-3.0.8/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
21512 +++ linux-3.0.8/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
21513 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
21514
21515 if (!entry) {
21516 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21517 - current->comm, current->pid, start, end);
21518 + current->comm, task_pid_nr(current), start, end);
21519 return -EINVAL;
21520 }
21521
21522 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
21523 while (cursor < to) {
21524 if (!devmem_is_allowed(pfn)) {
21525 printk(KERN_INFO
21526 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21527 - current->comm, from, to);
21528 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21529 + current->comm, from, to, cursor);
21530 return 0;
21531 }
21532 cursor += PAGE_SIZE;
21533 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
21534 printk(KERN_INFO
21535 "%s:%d ioremap_change_attr failed %s "
21536 "for %Lx-%Lx\n",
21537 - current->comm, current->pid,
21538 + current->comm, task_pid_nr(current),
21539 cattr_name(flags),
21540 base, (unsigned long long)(base + size));
21541 return -EINVAL;
21542 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21543 if (want_flags != flags) {
21544 printk(KERN_WARNING
21545 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21546 - current->comm, current->pid,
21547 + current->comm, task_pid_nr(current),
21548 cattr_name(want_flags),
21549 (unsigned long long)paddr,
21550 (unsigned long long)(paddr + size),
21551 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21552 free_memtype(paddr, paddr + size);
21553 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21554 " for %Lx-%Lx, got %s\n",
21555 - current->comm, current->pid,
21556 + current->comm, task_pid_nr(current),
21557 cattr_name(want_flags),
21558 (unsigned long long)paddr,
21559 (unsigned long long)(paddr + size),
21560 diff -urNp linux-3.0.8/arch/x86/mm/pf_in.c linux-3.0.8/arch/x86/mm/pf_in.c
21561 --- linux-3.0.8/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
21562 +++ linux-3.0.8/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
21563 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21564 int i;
21565 enum reason_type rv = OTHERS;
21566
21567 - p = (unsigned char *)ins_addr;
21568 + p = (unsigned char *)ktla_ktva(ins_addr);
21569 p += skip_prefix(p, &prf);
21570 p += get_opcode(p, &opcode);
21571
21572 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21573 struct prefix_bits prf;
21574 int i;
21575
21576 - p = (unsigned char *)ins_addr;
21577 + p = (unsigned char *)ktla_ktva(ins_addr);
21578 p += skip_prefix(p, &prf);
21579 p += get_opcode(p, &opcode);
21580
21581 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21582 struct prefix_bits prf;
21583 int i;
21584
21585 - p = (unsigned char *)ins_addr;
21586 + p = (unsigned char *)ktla_ktva(ins_addr);
21587 p += skip_prefix(p, &prf);
21588 p += get_opcode(p, &opcode);
21589
21590 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21591 struct prefix_bits prf;
21592 int i;
21593
21594 - p = (unsigned char *)ins_addr;
21595 + p = (unsigned char *)ktla_ktva(ins_addr);
21596 p += skip_prefix(p, &prf);
21597 p += get_opcode(p, &opcode);
21598 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21599 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21600 struct prefix_bits prf;
21601 int i;
21602
21603 - p = (unsigned char *)ins_addr;
21604 + p = (unsigned char *)ktla_ktva(ins_addr);
21605 p += skip_prefix(p, &prf);
21606 p += get_opcode(p, &opcode);
21607 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21608 diff -urNp linux-3.0.8/arch/x86/mm/pgtable_32.c linux-3.0.8/arch/x86/mm/pgtable_32.c
21609 --- linux-3.0.8/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
21610 +++ linux-3.0.8/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
21611 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21612 return;
21613 }
21614 pte = pte_offset_kernel(pmd, vaddr);
21615 +
21616 + pax_open_kernel();
21617 if (pte_val(pteval))
21618 set_pte_at(&init_mm, vaddr, pte, pteval);
21619 else
21620 pte_clear(&init_mm, vaddr, pte);
21621 + pax_close_kernel();
21622
21623 /*
21624 * It's enough to flush this one mapping.
21625 diff -urNp linux-3.0.8/arch/x86/mm/pgtable.c linux-3.0.8/arch/x86/mm/pgtable.c
21626 --- linux-3.0.8/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
21627 +++ linux-3.0.8/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
21628 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21629 list_del(&page->lru);
21630 }
21631
21632 -#define UNSHARED_PTRS_PER_PGD \
21633 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21634 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21635 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21636
21637 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21638 +{
21639 + while (count--)
21640 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21641 +}
21642 +#endif
21643 +
21644 +#ifdef CONFIG_PAX_PER_CPU_PGD
21645 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21646 +{
21647 + while (count--)
21648 +
21649 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21650 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21651 +#else
21652 + *dst++ = *src++;
21653 +#endif
21654
21655 +}
21656 +#endif
21657 +
21658 +#ifdef CONFIG_X86_64
21659 +#define pxd_t pud_t
21660 +#define pyd_t pgd_t
21661 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21662 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21663 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21664 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21665 +#define PYD_SIZE PGDIR_SIZE
21666 +#else
21667 +#define pxd_t pmd_t
21668 +#define pyd_t pud_t
21669 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21670 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21671 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21672 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21673 +#define PYD_SIZE PUD_SIZE
21674 +#endif
21675 +
21676 +#ifdef CONFIG_PAX_PER_CPU_PGD
21677 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21678 +static inline void pgd_dtor(pgd_t *pgd) {}
21679 +#else
21680 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21681 {
21682 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21683 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21684 pgd_list_del(pgd);
21685 spin_unlock(&pgd_lock);
21686 }
21687 +#endif
21688
21689 /*
21690 * List of all pgd's needed for non-PAE so it can invalidate entries
21691 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21692 * -- wli
21693 */
21694
21695 -#ifdef CONFIG_X86_PAE
21696 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21697 /*
21698 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21699 * updating the top-level pagetable entries to guarantee the
21700 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21701 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21702 * and initialize the kernel pmds here.
21703 */
21704 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21705 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21706
21707 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21708 {
21709 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21710 */
21711 flush_tlb_mm(mm);
21712 }
21713 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21714 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21715 #else /* !CONFIG_X86_PAE */
21716
21717 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21718 -#define PREALLOCATED_PMDS 0
21719 +#define PREALLOCATED_PXDS 0
21720
21721 #endif /* CONFIG_X86_PAE */
21722
21723 -static void free_pmds(pmd_t *pmds[])
21724 +static void free_pxds(pxd_t *pxds[])
21725 {
21726 int i;
21727
21728 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21729 - if (pmds[i])
21730 - free_page((unsigned long)pmds[i]);
21731 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21732 + if (pxds[i])
21733 + free_page((unsigned long)pxds[i]);
21734 }
21735
21736 -static int preallocate_pmds(pmd_t *pmds[])
21737 +static int preallocate_pxds(pxd_t *pxds[])
21738 {
21739 int i;
21740 bool failed = false;
21741
21742 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21743 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21744 - if (pmd == NULL)
21745 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21746 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21747 + if (pxd == NULL)
21748 failed = true;
21749 - pmds[i] = pmd;
21750 + pxds[i] = pxd;
21751 }
21752
21753 if (failed) {
21754 - free_pmds(pmds);
21755 + free_pxds(pxds);
21756 return -ENOMEM;
21757 }
21758
21759 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21760 * preallocate which never got a corresponding vma will need to be
21761 * freed manually.
21762 */
21763 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21764 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21765 {
21766 int i;
21767
21768 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21769 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21770 pgd_t pgd = pgdp[i];
21771
21772 if (pgd_val(pgd) != 0) {
21773 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21774 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21775
21776 - pgdp[i] = native_make_pgd(0);
21777 + set_pgd(pgdp + i, native_make_pgd(0));
21778
21779 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21780 - pmd_free(mm, pmd);
21781 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21782 + pxd_free(mm, pxd);
21783 }
21784 }
21785 }
21786
21787 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21788 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21789 {
21790 - pud_t *pud;
21791 + pyd_t *pyd;
21792 unsigned long addr;
21793 int i;
21794
21795 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21796 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21797 return;
21798
21799 - pud = pud_offset(pgd, 0);
21800 +#ifdef CONFIG_X86_64
21801 + pyd = pyd_offset(mm, 0L);
21802 +#else
21803 + pyd = pyd_offset(pgd, 0L);
21804 +#endif
21805
21806 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21807 - i++, pud++, addr += PUD_SIZE) {
21808 - pmd_t *pmd = pmds[i];
21809 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21810 + i++, pyd++, addr += PYD_SIZE) {
21811 + pxd_t *pxd = pxds[i];
21812
21813 if (i >= KERNEL_PGD_BOUNDARY)
21814 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21815 - sizeof(pmd_t) * PTRS_PER_PMD);
21816 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21817 + sizeof(pxd_t) * PTRS_PER_PMD);
21818
21819 - pud_populate(mm, pud, pmd);
21820 + pyd_populate(mm, pyd, pxd);
21821 }
21822 }
21823
21824 pgd_t *pgd_alloc(struct mm_struct *mm)
21825 {
21826 pgd_t *pgd;
21827 - pmd_t *pmds[PREALLOCATED_PMDS];
21828 + pxd_t *pxds[PREALLOCATED_PXDS];
21829
21830 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21831
21832 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21833
21834 mm->pgd = pgd;
21835
21836 - if (preallocate_pmds(pmds) != 0)
21837 + if (preallocate_pxds(pxds) != 0)
21838 goto out_free_pgd;
21839
21840 if (paravirt_pgd_alloc(mm) != 0)
21841 - goto out_free_pmds;
21842 + goto out_free_pxds;
21843
21844 /*
21845 * Make sure that pre-populating the pmds is atomic with
21846 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21847 spin_lock(&pgd_lock);
21848
21849 pgd_ctor(mm, pgd);
21850 - pgd_prepopulate_pmd(mm, pgd, pmds);
21851 + pgd_prepopulate_pxd(mm, pgd, pxds);
21852
21853 spin_unlock(&pgd_lock);
21854
21855 return pgd;
21856
21857 -out_free_pmds:
21858 - free_pmds(pmds);
21859 +out_free_pxds:
21860 + free_pxds(pxds);
21861 out_free_pgd:
21862 free_page((unsigned long)pgd);
21863 out:
21864 @@ -295,7 +344,7 @@ out:
21865
21866 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21867 {
21868 - pgd_mop_up_pmds(mm, pgd);
21869 + pgd_mop_up_pxds(mm, pgd);
21870 pgd_dtor(pgd);
21871 paravirt_pgd_free(mm, pgd);
21872 free_page((unsigned long)pgd);
21873 diff -urNp linux-3.0.8/arch/x86/mm/setup_nx.c linux-3.0.8/arch/x86/mm/setup_nx.c
21874 --- linux-3.0.8/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
21875 +++ linux-3.0.8/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
21876 @@ -5,8 +5,10 @@
21877 #include <asm/pgtable.h>
21878 #include <asm/proto.h>
21879
21880 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21881 static int disable_nx __cpuinitdata;
21882
21883 +#ifndef CONFIG_PAX_PAGEEXEC
21884 /*
21885 * noexec = on|off
21886 *
21887 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21888 return 0;
21889 }
21890 early_param("noexec", noexec_setup);
21891 +#endif
21892 +
21893 +#endif
21894
21895 void __cpuinit x86_configure_nx(void)
21896 {
21897 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21898 if (cpu_has_nx && !disable_nx)
21899 __supported_pte_mask |= _PAGE_NX;
21900 else
21901 +#endif
21902 __supported_pte_mask &= ~_PAGE_NX;
21903 }
21904
21905 diff -urNp linux-3.0.8/arch/x86/mm/tlb.c linux-3.0.8/arch/x86/mm/tlb.c
21906 --- linux-3.0.8/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
21907 +++ linux-3.0.8/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
21908 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
21909 BUG();
21910 cpumask_clear_cpu(cpu,
21911 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21912 +
21913 +#ifndef CONFIG_PAX_PER_CPU_PGD
21914 load_cr3(swapper_pg_dir);
21915 +#endif
21916 +
21917 }
21918 EXPORT_SYMBOL_GPL(leave_mm);
21919
21920 diff -urNp linux-3.0.8/arch/x86/net/bpf_jit_comp.c linux-3.0.8/arch/x86/net/bpf_jit_comp.c
21921 --- linux-3.0.8/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
21922 +++ linux-3.0.8/arch/x86/net/bpf_jit_comp.c 2011-11-01 05:23:50.000000000 -0400
21923 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21924 module_free(NULL, image);
21925 return;
21926 }
21927 + pax_open_kernel();
21928 memcpy(image + proglen, temp, ilen);
21929 + pax_close_kernel();
21930 }
21931 proglen += ilen;
21932 addrs[i] = proglen;
21933 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21934 break;
21935 }
21936 if (proglen == oldproglen) {
21937 - image = module_alloc(max_t(unsigned int,
21938 + image = module_alloc_exec(max_t(unsigned int,
21939 proglen,
21940 sizeof(struct work_struct)));
21941 if (!image)
21942 @@ -637,7 +639,7 @@ out:
21943
21944 static void jit_free_defer(struct work_struct *arg)
21945 {
21946 - module_free(NULL, arg);
21947 + module_free_exec(NULL, arg);
21948 }
21949
21950 /* run from softirq, we must use a work_struct to call
21951 diff -urNp linux-3.0.8/arch/x86/net/bpf_jit.S linux-3.0.8/arch/x86/net/bpf_jit.S
21952 --- linux-3.0.8/arch/x86/net/bpf_jit.S 2011-07-21 22:17:23.000000000 -0400
21953 +++ linux-3.0.8/arch/x86/net/bpf_jit.S 2011-10-07 19:07:28.000000000 -0400
21954 @@ -9,6 +9,7 @@
21955 */
21956 #include <linux/linkage.h>
21957 #include <asm/dwarf2.h>
21958 +#include <asm/alternative-asm.h>
21959
21960 /*
21961 * Calling convention :
21962 @@ -35,6 +36,7 @@ sk_load_word:
21963 jle bpf_slow_path_word
21964 mov (SKBDATA,%rsi),%eax
21965 bswap %eax /* ntohl() */
21966 + pax_force_retaddr
21967 ret
21968
21969
21970 @@ -53,6 +55,7 @@ sk_load_half:
21971 jle bpf_slow_path_half
21972 movzwl (SKBDATA,%rsi),%eax
21973 rol $8,%ax # ntohs()
21974 + pax_force_retaddr
21975 ret
21976
21977 sk_load_byte_ind:
21978 @@ -66,6 +69,7 @@ sk_load_byte:
21979 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
21980 jle bpf_slow_path_byte
21981 movzbl (SKBDATA,%rsi),%eax
21982 + pax_force_retaddr
21983 ret
21984
21985 /**
21986 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
21987 movzbl (SKBDATA,%rsi),%ebx
21988 and $15,%bl
21989 shl $2,%bl
21990 + pax_force_retaddr
21991 ret
21992 CFI_ENDPROC
21993 ENDPROC(sk_load_byte_msh)
21994 @@ -91,6 +96,7 @@ bpf_error:
21995 xor %eax,%eax
21996 mov -8(%rbp),%rbx
21997 leaveq
21998 + pax_force_retaddr
21999 ret
22000
22001 /* rsi contains offset and can be scratched */
22002 @@ -113,6 +119,7 @@ bpf_slow_path_word:
22003 js bpf_error
22004 mov -12(%rbp),%eax
22005 bswap %eax
22006 + pax_force_retaddr
22007 ret
22008
22009 bpf_slow_path_half:
22010 @@ -121,12 +128,14 @@ bpf_slow_path_half:
22011 mov -12(%rbp),%ax
22012 rol $8,%ax
22013 movzwl %ax,%eax
22014 + pax_force_retaddr
22015 ret
22016
22017 bpf_slow_path_byte:
22018 bpf_slow_path_common(1)
22019 js bpf_error
22020 movzbl -12(%rbp),%eax
22021 + pax_force_retaddr
22022 ret
22023
22024 bpf_slow_path_byte_msh:
22025 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
22026 and $15,%al
22027 shl $2,%al
22028 xchg %eax,%ebx
22029 + pax_force_retaddr
22030 ret
22031 diff -urNp linux-3.0.8/arch/x86/oprofile/backtrace.c linux-3.0.8/arch/x86/oprofile/backtrace.c
22032 --- linux-3.0.8/arch/x86/oprofile/backtrace.c 2011-10-24 08:05:21.000000000 -0400
22033 +++ linux-3.0.8/arch/x86/oprofile/backtrace.c 2011-10-06 04:17:55.000000000 -0400
22034 @@ -83,11 +83,11 @@ dump_user_backtrace_32(struct stack_fram
22035 struct stack_frame_ia32 *fp;
22036 unsigned long bytes;
22037
22038 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22039 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22040 if (bytes != sizeof(bufhead))
22041 return NULL;
22042
22043 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
22044 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
22045
22046 oprofile_add_trace(bufhead[0].return_address);
22047
22048 @@ -129,7 +129,7 @@ static struct stack_frame *dump_user_bac
22049 struct stack_frame bufhead[2];
22050 unsigned long bytes;
22051
22052 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22053 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22054 if (bytes != sizeof(bufhead))
22055 return NULL;
22056
22057 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
22058 {
22059 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
22060
22061 - if (!user_mode_vm(regs)) {
22062 + if (!user_mode(regs)) {
22063 unsigned long stack = kernel_stack_pointer(regs);
22064 if (depth)
22065 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22066 diff -urNp linux-3.0.8/arch/x86/pci/mrst.c linux-3.0.8/arch/x86/pci/mrst.c
22067 --- linux-3.0.8/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
22068 +++ linux-3.0.8/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
22069 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
22070 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
22071 pci_mmcfg_late_init();
22072 pcibios_enable_irq = mrst_pci_irq_enable;
22073 - pci_root_ops = pci_mrst_ops;
22074 + pax_open_kernel();
22075 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
22076 + pax_close_kernel();
22077 /* Continue with standard init */
22078 return 1;
22079 }
22080 diff -urNp linux-3.0.8/arch/x86/pci/pcbios.c linux-3.0.8/arch/x86/pci/pcbios.c
22081 --- linux-3.0.8/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
22082 +++ linux-3.0.8/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
22083 @@ -79,50 +79,93 @@ union bios32 {
22084 static struct {
22085 unsigned long address;
22086 unsigned short segment;
22087 -} bios32_indirect = { 0, __KERNEL_CS };
22088 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22089
22090 /*
22091 * Returns the entry point for the given service, NULL on error
22092 */
22093
22094 -static unsigned long bios32_service(unsigned long service)
22095 +static unsigned long __devinit bios32_service(unsigned long service)
22096 {
22097 unsigned char return_code; /* %al */
22098 unsigned long address; /* %ebx */
22099 unsigned long length; /* %ecx */
22100 unsigned long entry; /* %edx */
22101 unsigned long flags;
22102 + struct desc_struct d, *gdt;
22103
22104 local_irq_save(flags);
22105 - __asm__("lcall *(%%edi); cld"
22106 +
22107 + gdt = get_cpu_gdt_table(smp_processor_id());
22108 +
22109 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22110 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22111 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22112 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22113 +
22114 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22115 : "=a" (return_code),
22116 "=b" (address),
22117 "=c" (length),
22118 "=d" (entry)
22119 : "0" (service),
22120 "1" (0),
22121 - "D" (&bios32_indirect));
22122 + "D" (&bios32_indirect),
22123 + "r"(__PCIBIOS_DS)
22124 + : "memory");
22125 +
22126 + pax_open_kernel();
22127 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22128 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22129 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22130 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22131 + pax_close_kernel();
22132 +
22133 local_irq_restore(flags);
22134
22135 switch (return_code) {
22136 - case 0:
22137 - return address + entry;
22138 - case 0x80: /* Not present */
22139 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22140 - return 0;
22141 - default: /* Shouldn't happen */
22142 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22143 - service, return_code);
22144 + case 0: {
22145 + int cpu;
22146 + unsigned char flags;
22147 +
22148 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22149 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22150 + printk(KERN_WARNING "bios32_service: not valid\n");
22151 return 0;
22152 + }
22153 + address = address + PAGE_OFFSET;
22154 + length += 16UL; /* some BIOSs underreport this... */
22155 + flags = 4;
22156 + if (length >= 64*1024*1024) {
22157 + length >>= PAGE_SHIFT;
22158 + flags |= 8;
22159 + }
22160 +
22161 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22162 + gdt = get_cpu_gdt_table(cpu);
22163 + pack_descriptor(&d, address, length, 0x9b, flags);
22164 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22165 + pack_descriptor(&d, address, length, 0x93, flags);
22166 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22167 + }
22168 + return entry;
22169 + }
22170 + case 0x80: /* Not present */
22171 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22172 + return 0;
22173 + default: /* Shouldn't happen */
22174 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22175 + service, return_code);
22176 + return 0;
22177 }
22178 }
22179
22180 static struct {
22181 unsigned long address;
22182 unsigned short segment;
22183 -} pci_indirect = { 0, __KERNEL_CS };
22184 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22185
22186 -static int pci_bios_present;
22187 +static int pci_bios_present __read_only;
22188
22189 static int __devinit check_pcibios(void)
22190 {
22191 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
22192 unsigned long flags, pcibios_entry;
22193
22194 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22195 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22196 + pci_indirect.address = pcibios_entry;
22197
22198 local_irq_save(flags);
22199 - __asm__(
22200 - "lcall *(%%edi); cld\n\t"
22201 + __asm__("movw %w6, %%ds\n\t"
22202 + "lcall *%%ss:(%%edi); cld\n\t"
22203 + "push %%ss\n\t"
22204 + "pop %%ds\n\t"
22205 "jc 1f\n\t"
22206 "xor %%ah, %%ah\n"
22207 "1:"
22208 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
22209 "=b" (ebx),
22210 "=c" (ecx)
22211 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22212 - "D" (&pci_indirect)
22213 + "D" (&pci_indirect),
22214 + "r" (__PCIBIOS_DS)
22215 : "memory");
22216 local_irq_restore(flags);
22217
22218 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
22219
22220 switch (len) {
22221 case 1:
22222 - __asm__("lcall *(%%esi); cld\n\t"
22223 + __asm__("movw %w6, %%ds\n\t"
22224 + "lcall *%%ss:(%%esi); cld\n\t"
22225 + "push %%ss\n\t"
22226 + "pop %%ds\n\t"
22227 "jc 1f\n\t"
22228 "xor %%ah, %%ah\n"
22229 "1:"
22230 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
22231 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22232 "b" (bx),
22233 "D" ((long)reg),
22234 - "S" (&pci_indirect));
22235 + "S" (&pci_indirect),
22236 + "r" (__PCIBIOS_DS));
22237 /*
22238 * Zero-extend the result beyond 8 bits, do not trust the
22239 * BIOS having done it:
22240 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
22241 *value &= 0xff;
22242 break;
22243 case 2:
22244 - __asm__("lcall *(%%esi); cld\n\t"
22245 + __asm__("movw %w6, %%ds\n\t"
22246 + "lcall *%%ss:(%%esi); cld\n\t"
22247 + "push %%ss\n\t"
22248 + "pop %%ds\n\t"
22249 "jc 1f\n\t"
22250 "xor %%ah, %%ah\n"
22251 "1:"
22252 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
22253 : "1" (PCIBIOS_READ_CONFIG_WORD),
22254 "b" (bx),
22255 "D" ((long)reg),
22256 - "S" (&pci_indirect));
22257 + "S" (&pci_indirect),
22258 + "r" (__PCIBIOS_DS));
22259 /*
22260 * Zero-extend the result beyond 16 bits, do not trust the
22261 * BIOS having done it:
22262 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
22263 *value &= 0xffff;
22264 break;
22265 case 4:
22266 - __asm__("lcall *(%%esi); cld\n\t"
22267 + __asm__("movw %w6, %%ds\n\t"
22268 + "lcall *%%ss:(%%esi); cld\n\t"
22269 + "push %%ss\n\t"
22270 + "pop %%ds\n\t"
22271 "jc 1f\n\t"
22272 "xor %%ah, %%ah\n"
22273 "1:"
22274 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
22275 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22276 "b" (bx),
22277 "D" ((long)reg),
22278 - "S" (&pci_indirect));
22279 + "S" (&pci_indirect),
22280 + "r" (__PCIBIOS_DS));
22281 break;
22282 }
22283
22284 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
22285
22286 switch (len) {
22287 case 1:
22288 - __asm__("lcall *(%%esi); cld\n\t"
22289 + __asm__("movw %w6, %%ds\n\t"
22290 + "lcall *%%ss:(%%esi); cld\n\t"
22291 + "push %%ss\n\t"
22292 + "pop %%ds\n\t"
22293 "jc 1f\n\t"
22294 "xor %%ah, %%ah\n"
22295 "1:"
22296 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
22297 "c" (value),
22298 "b" (bx),
22299 "D" ((long)reg),
22300 - "S" (&pci_indirect));
22301 + "S" (&pci_indirect),
22302 + "r" (__PCIBIOS_DS));
22303 break;
22304 case 2:
22305 - __asm__("lcall *(%%esi); cld\n\t"
22306 + __asm__("movw %w6, %%ds\n\t"
22307 + "lcall *%%ss:(%%esi); cld\n\t"
22308 + "push %%ss\n\t"
22309 + "pop %%ds\n\t"
22310 "jc 1f\n\t"
22311 "xor %%ah, %%ah\n"
22312 "1:"
22313 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
22314 "c" (value),
22315 "b" (bx),
22316 "D" ((long)reg),
22317 - "S" (&pci_indirect));
22318 + "S" (&pci_indirect),
22319 + "r" (__PCIBIOS_DS));
22320 break;
22321 case 4:
22322 - __asm__("lcall *(%%esi); cld\n\t"
22323 + __asm__("movw %w6, %%ds\n\t"
22324 + "lcall *%%ss:(%%esi); cld\n\t"
22325 + "push %%ss\n\t"
22326 + "pop %%ds\n\t"
22327 "jc 1f\n\t"
22328 "xor %%ah, %%ah\n"
22329 "1:"
22330 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
22331 "c" (value),
22332 "b" (bx),
22333 "D" ((long)reg),
22334 - "S" (&pci_indirect));
22335 + "S" (&pci_indirect),
22336 + "r" (__PCIBIOS_DS));
22337 break;
22338 }
22339
22340 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
22341
22342 DBG("PCI: Fetching IRQ routing table... ");
22343 __asm__("push %%es\n\t"
22344 + "movw %w8, %%ds\n\t"
22345 "push %%ds\n\t"
22346 "pop %%es\n\t"
22347 - "lcall *(%%esi); cld\n\t"
22348 + "lcall *%%ss:(%%esi); cld\n\t"
22349 "pop %%es\n\t"
22350 + "push %%ss\n\t"
22351 + "pop %%ds\n"
22352 "jc 1f\n\t"
22353 "xor %%ah, %%ah\n"
22354 "1:"
22355 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
22356 "1" (0),
22357 "D" ((long) &opt),
22358 "S" (&pci_indirect),
22359 - "m" (opt)
22360 + "m" (opt),
22361 + "r" (__PCIBIOS_DS)
22362 : "memory");
22363 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22364 if (ret & 0xff00)
22365 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
22366 {
22367 int ret;
22368
22369 - __asm__("lcall *(%%esi); cld\n\t"
22370 + __asm__("movw %w5, %%ds\n\t"
22371 + "lcall *%%ss:(%%esi); cld\n\t"
22372 + "push %%ss\n\t"
22373 + "pop %%ds\n"
22374 "jc 1f\n\t"
22375 "xor %%ah, %%ah\n"
22376 "1:"
22377 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
22378 : "0" (PCIBIOS_SET_PCI_HW_INT),
22379 "b" ((dev->bus->number << 8) | dev->devfn),
22380 "c" ((irq << 8) | (pin + 10)),
22381 - "S" (&pci_indirect));
22382 + "S" (&pci_indirect),
22383 + "r" (__PCIBIOS_DS));
22384 return !(ret & 0xff00);
22385 }
22386 EXPORT_SYMBOL(pcibios_set_irq_routing);
22387 diff -urNp linux-3.0.8/arch/x86/platform/efi/efi_32.c linux-3.0.8/arch/x86/platform/efi/efi_32.c
22388 --- linux-3.0.8/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
22389 +++ linux-3.0.8/arch/x86/platform/efi/efi_32.c 2011-10-06 04:17:55.000000000 -0400
22390 @@ -38,70 +38,56 @@
22391 */
22392
22393 static unsigned long efi_rt_eflags;
22394 -static pgd_t efi_bak_pg_dir_pointer[2];
22395 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
22396
22397 -void efi_call_phys_prelog(void)
22398 +void __init efi_call_phys_prelog(void)
22399 {
22400 - unsigned long cr4;
22401 - unsigned long temp;
22402 struct desc_ptr gdt_descr;
22403
22404 - local_irq_save(efi_rt_eflags);
22405 +#ifdef CONFIG_PAX_KERNEXEC
22406 + struct desc_struct d;
22407 +#endif
22408
22409 - /*
22410 - * If I don't have PAE, I should just duplicate two entries in page
22411 - * directory. If I have PAE, I just need to duplicate one entry in
22412 - * page directory.
22413 - */
22414 - cr4 = read_cr4_safe();
22415 + local_irq_save(efi_rt_eflags);
22416
22417 - if (cr4 & X86_CR4_PAE) {
22418 - efi_bak_pg_dir_pointer[0].pgd =
22419 - swapper_pg_dir[pgd_index(0)].pgd;
22420 - swapper_pg_dir[0].pgd =
22421 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22422 - } else {
22423 - efi_bak_pg_dir_pointer[0].pgd =
22424 - swapper_pg_dir[pgd_index(0)].pgd;
22425 - efi_bak_pg_dir_pointer[1].pgd =
22426 - swapper_pg_dir[pgd_index(0x400000)].pgd;
22427 - swapper_pg_dir[pgd_index(0)].pgd =
22428 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22429 - temp = PAGE_OFFSET + 0x400000;
22430 - swapper_pg_dir[pgd_index(0x400000)].pgd =
22431 - swapper_pg_dir[pgd_index(temp)].pgd;
22432 - }
22433 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
22434 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22435 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
22436
22437 /*
22438 * After the lock is released, the original page table is restored.
22439 */
22440 __flush_tlb_all();
22441
22442 +#ifdef CONFIG_PAX_KERNEXEC
22443 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
22444 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22445 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
22446 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22447 +#endif
22448 +
22449 gdt_descr.address = __pa(get_cpu_gdt_table(0));
22450 gdt_descr.size = GDT_SIZE - 1;
22451 load_gdt(&gdt_descr);
22452 }
22453
22454 -void efi_call_phys_epilog(void)
22455 +void __init efi_call_phys_epilog(void)
22456 {
22457 - unsigned long cr4;
22458 struct desc_ptr gdt_descr;
22459
22460 +#ifdef CONFIG_PAX_KERNEXEC
22461 + struct desc_struct d;
22462 +
22463 + memset(&d, 0, sizeof d);
22464 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22465 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22466 +#endif
22467 +
22468 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
22469 gdt_descr.size = GDT_SIZE - 1;
22470 load_gdt(&gdt_descr);
22471
22472 - cr4 = read_cr4_safe();
22473 -
22474 - if (cr4 & X86_CR4_PAE) {
22475 - swapper_pg_dir[pgd_index(0)].pgd =
22476 - efi_bak_pg_dir_pointer[0].pgd;
22477 - } else {
22478 - swapper_pg_dir[pgd_index(0)].pgd =
22479 - efi_bak_pg_dir_pointer[0].pgd;
22480 - swapper_pg_dir[pgd_index(0x400000)].pgd =
22481 - efi_bak_pg_dir_pointer[1].pgd;
22482 - }
22483 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
22484
22485 /*
22486 * After the lock is released, the original page table is restored.
22487 diff -urNp linux-3.0.8/arch/x86/platform/efi/efi_stub_32.S linux-3.0.8/arch/x86/platform/efi/efi_stub_32.S
22488 --- linux-3.0.8/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
22489 +++ linux-3.0.8/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
22490 @@ -6,7 +6,9 @@
22491 */
22492
22493 #include <linux/linkage.h>
22494 +#include <linux/init.h>
22495 #include <asm/page_types.h>
22496 +#include <asm/segment.h>
22497
22498 /*
22499 * efi_call_phys(void *, ...) is a function with variable parameters.
22500 @@ -20,7 +22,7 @@
22501 * service functions will comply with gcc calling convention, too.
22502 */
22503
22504 -.text
22505 +__INIT
22506 ENTRY(efi_call_phys)
22507 /*
22508 * 0. The function can only be called in Linux kernel. So CS has been
22509 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
22510 * The mapping of lower virtual memory has been created in prelog and
22511 * epilog.
22512 */
22513 - movl $1f, %edx
22514 - subl $__PAGE_OFFSET, %edx
22515 - jmp *%edx
22516 + movl $(__KERNEXEC_EFI_DS), %edx
22517 + mov %edx, %ds
22518 + mov %edx, %es
22519 + mov %edx, %ss
22520 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
22521 1:
22522
22523 /*
22524 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
22525 * parameter 2, ..., param n. To make things easy, we save the return
22526 * address of efi_call_phys in a global variable.
22527 */
22528 - popl %edx
22529 - movl %edx, saved_return_addr
22530 - /* get the function pointer into ECX*/
22531 - popl %ecx
22532 - movl %ecx, efi_rt_function_ptr
22533 - movl $2f, %edx
22534 - subl $__PAGE_OFFSET, %edx
22535 - pushl %edx
22536 + popl (saved_return_addr)
22537 + popl (efi_rt_function_ptr)
22538
22539 /*
22540 * 3. Clear PG bit in %CR0.
22541 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
22542 /*
22543 * 5. Call the physical function.
22544 */
22545 - jmp *%ecx
22546 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
22547
22548 -2:
22549 /*
22550 * 6. After EFI runtime service returns, control will return to
22551 * following instruction. We'd better readjust stack pointer first.
22552 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22553 movl %cr0, %edx
22554 orl $0x80000000, %edx
22555 movl %edx, %cr0
22556 - jmp 1f
22557 -1:
22558 +
22559 /*
22560 * 8. Now restore the virtual mode from flat mode by
22561 * adding EIP with PAGE_OFFSET.
22562 */
22563 - movl $1f, %edx
22564 - jmp *%edx
22565 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22566 1:
22567 + movl $(__KERNEL_DS), %edx
22568 + mov %edx, %ds
22569 + mov %edx, %es
22570 + mov %edx, %ss
22571
22572 /*
22573 * 9. Balance the stack. And because EAX contain the return value,
22574 * we'd better not clobber it.
22575 */
22576 - leal efi_rt_function_ptr, %edx
22577 - movl (%edx), %ecx
22578 - pushl %ecx
22579 + pushl (efi_rt_function_ptr)
22580
22581 /*
22582 - * 10. Push the saved return address onto the stack and return.
22583 + * 10. Return to the saved return address.
22584 */
22585 - leal saved_return_addr, %edx
22586 - movl (%edx), %ecx
22587 - pushl %ecx
22588 - ret
22589 + jmpl *(saved_return_addr)
22590 ENDPROC(efi_call_phys)
22591 .previous
22592
22593 -.data
22594 +__INITDATA
22595 saved_return_addr:
22596 .long 0
22597 efi_rt_function_ptr:
22598 diff -urNp linux-3.0.8/arch/x86/platform/efi/efi_stub_64.S linux-3.0.8/arch/x86/platform/efi/efi_stub_64.S
22599 --- linux-3.0.8/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
22600 +++ linux-3.0.8/arch/x86/platform/efi/efi_stub_64.S 2011-10-06 04:17:55.000000000 -0400
22601 @@ -7,6 +7,7 @@
22602 */
22603
22604 #include <linux/linkage.h>
22605 +#include <asm/alternative-asm.h>
22606
22607 #define SAVE_XMM \
22608 mov %rsp, %rax; \
22609 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
22610 call *%rdi
22611 addq $32, %rsp
22612 RESTORE_XMM
22613 + pax_force_retaddr
22614 ret
22615 ENDPROC(efi_call0)
22616
22617 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
22618 call *%rdi
22619 addq $32, %rsp
22620 RESTORE_XMM
22621 + pax_force_retaddr
22622 ret
22623 ENDPROC(efi_call1)
22624
22625 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
22626 call *%rdi
22627 addq $32, %rsp
22628 RESTORE_XMM
22629 + pax_force_retaddr
22630 ret
22631 ENDPROC(efi_call2)
22632
22633 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
22634 call *%rdi
22635 addq $32, %rsp
22636 RESTORE_XMM
22637 + pax_force_retaddr
22638 ret
22639 ENDPROC(efi_call3)
22640
22641 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
22642 call *%rdi
22643 addq $32, %rsp
22644 RESTORE_XMM
22645 + pax_force_retaddr
22646 ret
22647 ENDPROC(efi_call4)
22648
22649 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
22650 call *%rdi
22651 addq $48, %rsp
22652 RESTORE_XMM
22653 + pax_force_retaddr
22654 ret
22655 ENDPROC(efi_call5)
22656
22657 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
22658 call *%rdi
22659 addq $48, %rsp
22660 RESTORE_XMM
22661 + pax_force_retaddr
22662 ret
22663 ENDPROC(efi_call6)
22664 diff -urNp linux-3.0.8/arch/x86/platform/mrst/mrst.c linux-3.0.8/arch/x86/platform/mrst/mrst.c
22665 --- linux-3.0.8/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
22666 +++ linux-3.0.8/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
22667 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22668 }
22669
22670 /* Reboot and power off are handled by the SCU on a MID device */
22671 -static void mrst_power_off(void)
22672 +static __noreturn void mrst_power_off(void)
22673 {
22674 intel_scu_ipc_simple_command(0xf1, 1);
22675 + BUG();
22676 }
22677
22678 -static void mrst_reboot(void)
22679 +static __noreturn void mrst_reboot(void)
22680 {
22681 intel_scu_ipc_simple_command(0xf1, 0);
22682 + BUG();
22683 }
22684
22685 /*
22686 diff -urNp linux-3.0.8/arch/x86/platform/uv/tlb_uv.c linux-3.0.8/arch/x86/platform/uv/tlb_uv.c
22687 --- linux-3.0.8/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
22688 +++ linux-3.0.8/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
22689 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
22690 cpumask_t mask;
22691 struct reset_args reset_args;
22692
22693 + pax_track_stack();
22694 +
22695 reset_args.sender = sender;
22696 cpus_clear(mask);
22697 /* find a single cpu for each uvhub in this distribution mask */
22698 diff -urNp linux-3.0.8/arch/x86/power/cpu.c linux-3.0.8/arch/x86/power/cpu.c
22699 --- linux-3.0.8/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
22700 +++ linux-3.0.8/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
22701 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
22702 static void fix_processor_context(void)
22703 {
22704 int cpu = smp_processor_id();
22705 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22706 + struct tss_struct *t = init_tss + cpu;
22707
22708 set_tss_desc(cpu, t); /*
22709 * This just modifies memory; should not be
22710 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
22711 */
22712
22713 #ifdef CONFIG_X86_64
22714 + pax_open_kernel();
22715 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22716 + pax_close_kernel();
22717
22718 syscall_init(); /* This sets MSR_*STAR and related */
22719 #endif
22720 diff -urNp linux-3.0.8/arch/x86/vdso/Makefile linux-3.0.8/arch/x86/vdso/Makefile
22721 --- linux-3.0.8/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
22722 +++ linux-3.0.8/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
22723 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
22724 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22725 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22726
22727 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22728 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22729 GCOV_PROFILE := n
22730
22731 #
22732 diff -urNp linux-3.0.8/arch/x86/vdso/vdso32-setup.c linux-3.0.8/arch/x86/vdso/vdso32-setup.c
22733 --- linux-3.0.8/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
22734 +++ linux-3.0.8/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
22735 @@ -25,6 +25,7 @@
22736 #include <asm/tlbflush.h>
22737 #include <asm/vdso.h>
22738 #include <asm/proto.h>
22739 +#include <asm/mman.h>
22740
22741 enum {
22742 VDSO_DISABLED = 0,
22743 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22744 void enable_sep_cpu(void)
22745 {
22746 int cpu = get_cpu();
22747 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22748 + struct tss_struct *tss = init_tss + cpu;
22749
22750 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22751 put_cpu();
22752 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22753 gate_vma.vm_start = FIXADDR_USER_START;
22754 gate_vma.vm_end = FIXADDR_USER_END;
22755 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22756 - gate_vma.vm_page_prot = __P101;
22757 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22758 /*
22759 * Make sure the vDSO gets into every core dump.
22760 * Dumping its contents makes post-mortem fully interpretable later
22761 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22762 if (compat)
22763 addr = VDSO_HIGH_BASE;
22764 else {
22765 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22766 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22767 if (IS_ERR_VALUE(addr)) {
22768 ret = addr;
22769 goto up_fail;
22770 }
22771 }
22772
22773 - current->mm->context.vdso = (void *)addr;
22774 + current->mm->context.vdso = addr;
22775
22776 if (compat_uses_vma || !compat) {
22777 /*
22778 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22779 }
22780
22781 current_thread_info()->sysenter_return =
22782 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22783 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22784
22785 up_fail:
22786 if (ret)
22787 - current->mm->context.vdso = NULL;
22788 + current->mm->context.vdso = 0;
22789
22790 up_write(&mm->mmap_sem);
22791
22792 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22793
22794 const char *arch_vma_name(struct vm_area_struct *vma)
22795 {
22796 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22797 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22798 return "[vdso]";
22799 +
22800 +#ifdef CONFIG_PAX_SEGMEXEC
22801 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22802 + return "[vdso]";
22803 +#endif
22804 +
22805 return NULL;
22806 }
22807
22808 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22809 * Check to see if the corresponding task was created in compat vdso
22810 * mode.
22811 */
22812 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22813 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22814 return &gate_vma;
22815 return NULL;
22816 }
22817 diff -urNp linux-3.0.8/arch/x86/vdso/vma.c linux-3.0.8/arch/x86/vdso/vma.c
22818 --- linux-3.0.8/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
22819 +++ linux-3.0.8/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
22820 @@ -15,18 +15,19 @@
22821 #include <asm/proto.h>
22822 #include <asm/vdso.h>
22823
22824 -unsigned int __read_mostly vdso_enabled = 1;
22825 -
22826 extern char vdso_start[], vdso_end[];
22827 extern unsigned short vdso_sync_cpuid;
22828 +extern char __vsyscall_0;
22829
22830 static struct page **vdso_pages;
22831 +static struct page *vsyscall_page;
22832 static unsigned vdso_size;
22833
22834 static int __init init_vdso_vars(void)
22835 {
22836 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
22837 - int i;
22838 + size_t nbytes = vdso_end - vdso_start;
22839 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
22840 + size_t i;
22841
22842 vdso_size = npages << PAGE_SHIFT;
22843 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
22844 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
22845 goto oom;
22846 for (i = 0; i < npages; i++) {
22847 struct page *p;
22848 - p = alloc_page(GFP_KERNEL);
22849 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
22850 if (!p)
22851 goto oom;
22852 vdso_pages[i] = p;
22853 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
22854 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
22855 + nbytes -= PAGE_SIZE;
22856 }
22857 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
22858
22859 return 0;
22860
22861 oom:
22862 - printk("Cannot allocate vdso\n");
22863 - vdso_enabled = 0;
22864 - return -ENOMEM;
22865 + panic("Cannot allocate vdso\n");
22866 }
22867 subsys_initcall(init_vdso_vars);
22868
22869 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
22870 unsigned long addr;
22871 int ret;
22872
22873 - if (!vdso_enabled)
22874 - return 0;
22875 -
22876 down_write(&mm->mmap_sem);
22877 - addr = vdso_addr(mm->start_stack, vdso_size);
22878 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22879 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
22880 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
22881 if (IS_ERR_VALUE(addr)) {
22882 ret = addr;
22883 goto up_fail;
22884 }
22885
22886 - current->mm->context.vdso = (void *)addr;
22887 + mm->context.vdso = addr + PAGE_SIZE;
22888
22889 - ret = install_special_mapping(mm, addr, vdso_size,
22890 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
22891 VM_READ|VM_EXEC|
22892 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22893 + VM_MAYREAD|VM_MAYEXEC|
22894 VM_ALWAYSDUMP,
22895 - vdso_pages);
22896 + &vsyscall_page);
22897 if (ret) {
22898 - current->mm->context.vdso = NULL;
22899 + mm->context.vdso = 0;
22900 goto up_fail;
22901 }
22902
22903 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
22904 + VM_READ|VM_EXEC|
22905 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22906 + VM_ALWAYSDUMP,
22907 + vdso_pages);
22908 + if (ret)
22909 + mm->context.vdso = 0;
22910 +
22911 up_fail:
22912 up_write(&mm->mmap_sem);
22913 return ret;
22914 }
22915 -
22916 -static __init int vdso_setup(char *s)
22917 -{
22918 - vdso_enabled = simple_strtoul(s, NULL, 0);
22919 - return 0;
22920 -}
22921 -__setup("vdso=", vdso_setup);
22922 diff -urNp linux-3.0.8/arch/x86/xen/enlighten.c linux-3.0.8/arch/x86/xen/enlighten.c
22923 --- linux-3.0.8/arch/x86/xen/enlighten.c 2011-10-24 08:05:23.000000000 -0400
22924 +++ linux-3.0.8/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
22925 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22926
22927 struct shared_info xen_dummy_shared_info;
22928
22929 -void *xen_initial_gdt;
22930 -
22931 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22932 __read_mostly int xen_have_vector_callback;
22933 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22934 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
22935 #endif
22936 };
22937
22938 -static void xen_reboot(int reason)
22939 +static __noreturn void xen_reboot(int reason)
22940 {
22941 struct sched_shutdown r = { .reason = reason };
22942
22943 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
22944 BUG();
22945 }
22946
22947 -static void xen_restart(char *msg)
22948 +static __noreturn void xen_restart(char *msg)
22949 {
22950 xen_reboot(SHUTDOWN_reboot);
22951 }
22952
22953 -static void xen_emergency_restart(void)
22954 +static __noreturn void xen_emergency_restart(void)
22955 {
22956 xen_reboot(SHUTDOWN_reboot);
22957 }
22958
22959 -static void xen_machine_halt(void)
22960 +static __noreturn void xen_machine_halt(void)
22961 {
22962 xen_reboot(SHUTDOWN_poweroff);
22963 }
22964 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
22965 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22966
22967 /* Work out if we support NX */
22968 - x86_configure_nx();
22969 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22970 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22971 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22972 + unsigned l, h;
22973 +
22974 + __supported_pte_mask |= _PAGE_NX;
22975 + rdmsr(MSR_EFER, l, h);
22976 + l |= EFER_NX;
22977 + wrmsr(MSR_EFER, l, h);
22978 + }
22979 +#endif
22980
22981 xen_setup_features();
22982
22983 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
22984
22985 machine_ops = xen_machine_ops;
22986
22987 - /*
22988 - * The only reliable way to retain the initial address of the
22989 - * percpu gdt_page is to remember it here, so we can go and
22990 - * mark it RW later, when the initial percpu area is freed.
22991 - */
22992 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22993 -
22994 xen_smp_init();
22995
22996 #ifdef CONFIG_ACPI_NUMA
22997 diff -urNp linux-3.0.8/arch/x86/xen/mmu.c linux-3.0.8/arch/x86/xen/mmu.c
22998 --- linux-3.0.8/arch/x86/xen/mmu.c 2011-10-24 08:05:23.000000000 -0400
22999 +++ linux-3.0.8/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
23000 @@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
23001 convert_pfn_mfn(init_level4_pgt);
23002 convert_pfn_mfn(level3_ident_pgt);
23003 convert_pfn_mfn(level3_kernel_pgt);
23004 + convert_pfn_mfn(level3_vmalloc_pgt);
23005 + convert_pfn_mfn(level3_vmemmap_pgt);
23006
23007 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23008 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23009 @@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
23010 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23011 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23012 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23013 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23014 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23015 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23016 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23017 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23018 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23019
23020 @@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
23021 pv_mmu_ops.set_pud = xen_set_pud;
23022 #if PAGETABLE_LEVELS == 4
23023 pv_mmu_ops.set_pgd = xen_set_pgd;
23024 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23025 #endif
23026
23027 /* This will work as long as patching hasn't happened yet
23028 @@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
23029 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23030 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23031 .set_pgd = xen_set_pgd_hyper,
23032 + .set_pgd_batched = xen_set_pgd_hyper,
23033
23034 .alloc_pud = xen_alloc_pmd_init,
23035 .release_pud = xen_release_pmd_init,
23036 diff -urNp linux-3.0.8/arch/x86/xen/smp.c linux-3.0.8/arch/x86/xen/smp.c
23037 --- linux-3.0.8/arch/x86/xen/smp.c 2011-10-24 08:05:30.000000000 -0400
23038 +++ linux-3.0.8/arch/x86/xen/smp.c 2011-10-16 21:55:27.000000000 -0400
23039 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
23040 {
23041 BUG_ON(smp_processor_id() != 0);
23042 native_smp_prepare_boot_cpu();
23043 -
23044 - /* We've switched to the "real" per-cpu gdt, so make sure the
23045 - old memory can be recycled */
23046 - make_lowmem_page_readwrite(xen_initial_gdt);
23047 -
23048 xen_filter_cpu_maps();
23049 xen_setup_vcpu_info_placement();
23050 }
23051 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
23052 gdt = get_cpu_gdt_table(cpu);
23053
23054 ctxt->flags = VGCF_IN_KERNEL;
23055 - ctxt->user_regs.ds = __USER_DS;
23056 - ctxt->user_regs.es = __USER_DS;
23057 + ctxt->user_regs.ds = __KERNEL_DS;
23058 + ctxt->user_regs.es = __KERNEL_DS;
23059 ctxt->user_regs.ss = __KERNEL_DS;
23060 #ifdef CONFIG_X86_32
23061 ctxt->user_regs.fs = __KERNEL_PERCPU;
23062 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23063 + savesegment(gs, ctxt->user_regs.gs);
23064 #else
23065 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23066 #endif
23067 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
23068 int rc;
23069
23070 per_cpu(current_task, cpu) = idle;
23071 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23072 #ifdef CONFIG_X86_32
23073 irq_ctx_init(cpu);
23074 #else
23075 clear_tsk_thread_flag(idle, TIF_FORK);
23076 - per_cpu(kernel_stack, cpu) =
23077 - (unsigned long)task_stack_page(idle) -
23078 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23079 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23080 #endif
23081 xen_setup_runstate_info(cpu);
23082 xen_setup_timer(cpu);
23083 diff -urNp linux-3.0.8/arch/x86/xen/xen-asm_32.S linux-3.0.8/arch/x86/xen/xen-asm_32.S
23084 --- linux-3.0.8/arch/x86/xen/xen-asm_32.S 2011-10-24 08:05:30.000000000 -0400
23085 +++ linux-3.0.8/arch/x86/xen/xen-asm_32.S 2011-10-16 21:55:27.000000000 -0400
23086 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23087 ESP_OFFSET=4 # bytes pushed onto stack
23088
23089 /*
23090 - * Store vcpu_info pointer for easy access. Do it this way to
23091 - * avoid having to reload %fs
23092 + * Store vcpu_info pointer for easy access.
23093 */
23094 #ifdef CONFIG_SMP
23095 - GET_THREAD_INFO(%eax)
23096 - movl TI_cpu(%eax), %eax
23097 - movl __per_cpu_offset(,%eax,4), %eax
23098 - mov xen_vcpu(%eax), %eax
23099 + push %fs
23100 + mov $(__KERNEL_PERCPU), %eax
23101 + mov %eax, %fs
23102 + mov PER_CPU_VAR(xen_vcpu), %eax
23103 + pop %fs
23104 #else
23105 movl xen_vcpu, %eax
23106 #endif
23107 diff -urNp linux-3.0.8/arch/x86/xen/xen-head.S linux-3.0.8/arch/x86/xen/xen-head.S
23108 --- linux-3.0.8/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
23109 +++ linux-3.0.8/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
23110 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23111 #ifdef CONFIG_X86_32
23112 mov %esi,xen_start_info
23113 mov $init_thread_union+THREAD_SIZE,%esp
23114 +#ifdef CONFIG_SMP
23115 + movl $cpu_gdt_table,%edi
23116 + movl $__per_cpu_load,%eax
23117 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23118 + rorl $16,%eax
23119 + movb %al,__KERNEL_PERCPU + 4(%edi)
23120 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23121 + movl $__per_cpu_end - 1,%eax
23122 + subl $__per_cpu_start,%eax
23123 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23124 +#endif
23125 #else
23126 mov %rsi,xen_start_info
23127 mov $init_thread_union+THREAD_SIZE,%rsp
23128 diff -urNp linux-3.0.8/arch/x86/xen/xen-ops.h linux-3.0.8/arch/x86/xen/xen-ops.h
23129 --- linux-3.0.8/arch/x86/xen/xen-ops.h 2011-10-24 08:05:21.000000000 -0400
23130 +++ linux-3.0.8/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
23131 @@ -10,8 +10,6 @@
23132 extern const char xen_hypervisor_callback[];
23133 extern const char xen_failsafe_callback[];
23134
23135 -extern void *xen_initial_gdt;
23136 -
23137 struct trap_info;
23138 void xen_copy_trap_info(struct trap_info *traps);
23139
23140 diff -urNp linux-3.0.8/block/blk-iopoll.c linux-3.0.8/block/blk-iopoll.c
23141 --- linux-3.0.8/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
23142 +++ linux-3.0.8/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
23143 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23144 }
23145 EXPORT_SYMBOL(blk_iopoll_complete);
23146
23147 -static void blk_iopoll_softirq(struct softirq_action *h)
23148 +static void blk_iopoll_softirq(void)
23149 {
23150 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23151 int rearm = 0, budget = blk_iopoll_budget;
23152 diff -urNp linux-3.0.8/block/blk-map.c linux-3.0.8/block/blk-map.c
23153 --- linux-3.0.8/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
23154 +++ linux-3.0.8/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
23155 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
23156 if (!len || !kbuf)
23157 return -EINVAL;
23158
23159 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
23160 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
23161 if (do_copy)
23162 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23163 else
23164 diff -urNp linux-3.0.8/block/blk-softirq.c linux-3.0.8/block/blk-softirq.c
23165 --- linux-3.0.8/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
23166 +++ linux-3.0.8/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
23167 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23168 * Softirq action handler - move entries to local list and loop over them
23169 * while passing them to the queue registered handler.
23170 */
23171 -static void blk_done_softirq(struct softirq_action *h)
23172 +static void blk_done_softirq(void)
23173 {
23174 struct list_head *cpu_list, local_list;
23175
23176 diff -urNp linux-3.0.8/block/bsg.c linux-3.0.8/block/bsg.c
23177 --- linux-3.0.8/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
23178 +++ linux-3.0.8/block/bsg.c 2011-10-06 04:17:55.000000000 -0400
23179 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23180 struct sg_io_v4 *hdr, struct bsg_device *bd,
23181 fmode_t has_write_perm)
23182 {
23183 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23184 + unsigned char *cmdptr;
23185 +
23186 if (hdr->request_len > BLK_MAX_CDB) {
23187 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23188 if (!rq->cmd)
23189 return -ENOMEM;
23190 - }
23191 + cmdptr = rq->cmd;
23192 + } else
23193 + cmdptr = tmpcmd;
23194
23195 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23196 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
23197 hdr->request_len))
23198 return -EFAULT;
23199
23200 + if (cmdptr != rq->cmd)
23201 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23202 +
23203 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23204 if (blk_verify_command(rq->cmd, has_write_perm))
23205 return -EPERM;
23206 @@ -249,7 +257,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
23207 struct request *rq, *next_rq = NULL;
23208 int ret, rw;
23209 unsigned int dxfer_len;
23210 - void *dxferp = NULL;
23211 + void __user *dxferp = NULL;
23212 struct bsg_class_device *bcd = &q->bsg_dev;
23213
23214 /* if the LLD has been removed then the bsg_unregister_queue will
23215 @@ -291,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
23216 rq->next_rq = next_rq;
23217 next_rq->cmd_type = rq->cmd_type;
23218
23219 - dxferp = (void*)(unsigned long)hdr->din_xferp;
23220 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
23221 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
23222 hdr->din_xfer_len, GFP_KERNEL);
23223 if (ret)
23224 @@ -300,10 +308,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
23225
23226 if (hdr->dout_xfer_len) {
23227 dxfer_len = hdr->dout_xfer_len;
23228 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
23229 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
23230 } else if (hdr->din_xfer_len) {
23231 dxfer_len = hdr->din_xfer_len;
23232 - dxferp = (void*)(unsigned long)hdr->din_xferp;
23233 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
23234 } else
23235 dxfer_len = 0;
23236
23237 @@ -445,7 +453,7 @@ static int blk_complete_sgv4_hdr_rq(stru
23238 int len = min_t(unsigned int, hdr->max_response_len,
23239 rq->sense_len);
23240
23241 - ret = copy_to_user((void*)(unsigned long)hdr->response,
23242 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
23243 rq->sense, len);
23244 if (!ret)
23245 hdr->response_len = len;
23246 diff -urNp linux-3.0.8/block/compat_ioctl.c linux-3.0.8/block/compat_ioctl.c
23247 --- linux-3.0.8/block/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
23248 +++ linux-3.0.8/block/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
23249 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
23250 err |= __get_user(f->spec1, &uf->spec1);
23251 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
23252 err |= __get_user(name, &uf->name);
23253 - f->name = compat_ptr(name);
23254 + f->name = (void __force_kernel *)compat_ptr(name);
23255 if (err) {
23256 err = -EFAULT;
23257 goto out;
23258 diff -urNp linux-3.0.8/block/scsi_ioctl.c linux-3.0.8/block/scsi_ioctl.c
23259 --- linux-3.0.8/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
23260 +++ linux-3.0.8/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
23261 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
23262 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23263 struct sg_io_hdr *hdr, fmode_t mode)
23264 {
23265 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23266 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23267 + unsigned char *cmdptr;
23268 +
23269 + if (rq->cmd != rq->__cmd)
23270 + cmdptr = rq->cmd;
23271 + else
23272 + cmdptr = tmpcmd;
23273 +
23274 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23275 return -EFAULT;
23276 +
23277 + if (cmdptr != rq->cmd)
23278 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23279 +
23280 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23281 return -EPERM;
23282
23283 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
23284 int err;
23285 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23286 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23287 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23288 + unsigned char *cmdptr;
23289
23290 if (!sic)
23291 return -EINVAL;
23292 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
23293 */
23294 err = -EFAULT;
23295 rq->cmd_len = cmdlen;
23296 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23297 +
23298 + if (rq->cmd != rq->__cmd)
23299 + cmdptr = rq->cmd;
23300 + else
23301 + cmdptr = tmpcmd;
23302 +
23303 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23304 goto error;
23305
23306 + if (rq->cmd != cmdptr)
23307 + memcpy(rq->cmd, cmdptr, cmdlen);
23308 +
23309 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23310 goto error;
23311
23312 diff -urNp linux-3.0.8/crypto/cryptd.c linux-3.0.8/crypto/cryptd.c
23313 --- linux-3.0.8/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
23314 +++ linux-3.0.8/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
23315 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
23316
23317 struct cryptd_blkcipher_request_ctx {
23318 crypto_completion_t complete;
23319 -};
23320 +} __no_const;
23321
23322 struct cryptd_hash_ctx {
23323 struct crypto_shash *child;
23324 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
23325
23326 struct cryptd_aead_request_ctx {
23327 crypto_completion_t complete;
23328 -};
23329 +} __no_const;
23330
23331 static void cryptd_queue_worker(struct work_struct *work);
23332
23333 diff -urNp linux-3.0.8/crypto/gf128mul.c linux-3.0.8/crypto/gf128mul.c
23334 --- linux-3.0.8/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
23335 +++ linux-3.0.8/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
23336 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23337 for (i = 0; i < 7; ++i)
23338 gf128mul_x_lle(&p[i + 1], &p[i]);
23339
23340 - memset(r, 0, sizeof(r));
23341 + memset(r, 0, sizeof(*r));
23342 for (i = 0;;) {
23343 u8 ch = ((u8 *)b)[15 - i];
23344
23345 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
23346 for (i = 0; i < 7; ++i)
23347 gf128mul_x_bbe(&p[i + 1], &p[i]);
23348
23349 - memset(r, 0, sizeof(r));
23350 + memset(r, 0, sizeof(*r));
23351 for (i = 0;;) {
23352 u8 ch = ((u8 *)b)[i];
23353
23354 diff -urNp linux-3.0.8/crypto/serpent.c linux-3.0.8/crypto/serpent.c
23355 --- linux-3.0.8/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
23356 +++ linux-3.0.8/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
23357 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23358 u32 r0,r1,r2,r3,r4;
23359 int i;
23360
23361 + pax_track_stack();
23362 +
23363 /* Copy key, add padding */
23364
23365 for (i = 0; i < keylen; ++i)
23366 diff -urNp linux-3.0.8/Documentation/dontdiff linux-3.0.8/Documentation/dontdiff
23367 --- linux-3.0.8/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
23368 +++ linux-3.0.8/Documentation/dontdiff 2011-10-20 04:46:01.000000000 -0400
23369 @@ -5,6 +5,7 @@
23370 *.cis
23371 *.cpio
23372 *.csp
23373 +*.dbg
23374 *.dsp
23375 *.dvi
23376 *.elf
23377 @@ -48,9 +49,11 @@
23378 *.tab.h
23379 *.tex
23380 *.ver
23381 +*.vim
23382 *.xml
23383 *.xz
23384 *_MODULES
23385 +*_reg_safe.h
23386 *_vga16.c
23387 *~
23388 \#*#
23389 @@ -70,6 +73,7 @@ Kerntypes
23390 Module.markers
23391 Module.symvers
23392 PENDING
23393 +PERF*
23394 SCCS
23395 System.map*
23396 TAGS
23397 @@ -98,14 +102,18 @@ bzImage*
23398 capability_names.h
23399 capflags.c
23400 classlist.h*
23401 +clut_vga16.c
23402 +common-cmds.h
23403 comp*.log
23404 compile.h*
23405 conf
23406 config
23407 config-*
23408 config_data.h*
23409 +config.c
23410 config.mak
23411 config.mak.autogen
23412 +config.tmp
23413 conmakehash
23414 consolemap_deftbl.c*
23415 cpustr.h
23416 @@ -126,12 +134,14 @@ fore200e_pca_fw.c*
23417 gconf
23418 gconf.glade.h
23419 gen-devlist
23420 +gen-kdb_cmds.c
23421 gen_crc32table
23422 gen_init_cpio
23423 generated
23424 genheaders
23425 genksyms
23426 *_gray256.c
23427 +hash
23428 hpet_example
23429 hugepage-mmap
23430 hugepage-shm
23431 @@ -146,7 +156,7 @@ int32.c
23432 int4.c
23433 int8.c
23434 kallsyms
23435 -kconfig
23436 +kern_constants.h
23437 keywords.c
23438 ksym.c*
23439 ksym.h*
23440 @@ -154,7 +164,6 @@ kxgettext
23441 lkc_defs.h
23442 lex.c
23443 lex.*.c
23444 -linux
23445 logo_*.c
23446 logo_*_clut224.c
23447 logo_*_mono.c
23448 @@ -166,7 +175,6 @@ machtypes.h
23449 map
23450 map_hugetlb
23451 maui_boot.h
23452 -media
23453 mconf
23454 miboot*
23455 mk_elfconfig
23456 @@ -174,6 +182,7 @@ mkboot
23457 mkbugboot
23458 mkcpustr
23459 mkdep
23460 +mkpiggy
23461 mkprep
23462 mkregtable
23463 mktables
23464 @@ -209,6 +218,7 @@ r300_reg_safe.h
23465 r420_reg_safe.h
23466 r600_reg_safe.h
23467 recordmcount
23468 +regdb.c
23469 relocs
23470 rlim_names.h
23471 rn50_reg_safe.h
23472 @@ -219,6 +229,7 @@ setup
23473 setup.bin
23474 setup.elf
23475 sImage
23476 +slabinfo
23477 sm_tbl*
23478 split-include
23479 syscalltab.h
23480 @@ -229,6 +240,7 @@ tftpboot.img
23481 timeconst.h
23482 times.h*
23483 trix_boot.h
23484 +user_constants.h
23485 utsrelease.h*
23486 vdso-syms.lds
23487 vdso.lds
23488 @@ -246,7 +258,9 @@ vmlinux
23489 vmlinux-*
23490 vmlinux.aout
23491 vmlinux.bin.all
23492 +vmlinux.bin.bz2
23493 vmlinux.lds
23494 +vmlinux.relocs
23495 vmlinuz
23496 voffset.h
23497 vsyscall.lds
23498 @@ -254,6 +268,7 @@ vsyscall_32.lds
23499 wanxlfw.inc
23500 uImage
23501 unifdef
23502 +utsrelease.h
23503 wakeup.bin
23504 wakeup.elf
23505 wakeup.lds
23506 diff -urNp linux-3.0.8/Documentation/kernel-parameters.txt linux-3.0.8/Documentation/kernel-parameters.txt
23507 --- linux-3.0.8/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
23508 +++ linux-3.0.8/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
23509 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
23510 the specified number of seconds. This is to be used if
23511 your oopses keep scrolling off the screen.
23512
23513 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23514 + virtualization environments that don't cope well with the
23515 + expand down segment used by UDEREF on X86-32 or the frequent
23516 + page table updates on X86-64.
23517 +
23518 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23519 +
23520 pcbit= [HW,ISDN]
23521
23522 pcd. [PARIDE]
23523 diff -urNp linux-3.0.8/drivers/acpi/apei/cper.c linux-3.0.8/drivers/acpi/apei/cper.c
23524 --- linux-3.0.8/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
23525 +++ linux-3.0.8/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
23526 @@ -38,12 +38,12 @@
23527 */
23528 u64 cper_next_record_id(void)
23529 {
23530 - static atomic64_t seq;
23531 + static atomic64_unchecked_t seq;
23532
23533 - if (!atomic64_read(&seq))
23534 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
23535 + if (!atomic64_read_unchecked(&seq))
23536 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
23537
23538 - return atomic64_inc_return(&seq);
23539 + return atomic64_inc_return_unchecked(&seq);
23540 }
23541 EXPORT_SYMBOL_GPL(cper_next_record_id);
23542
23543 diff -urNp linux-3.0.8/drivers/acpi/ec_sys.c linux-3.0.8/drivers/acpi/ec_sys.c
23544 --- linux-3.0.8/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
23545 +++ linux-3.0.8/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
23546 @@ -11,6 +11,7 @@
23547 #include <linux/kernel.h>
23548 #include <linux/acpi.h>
23549 #include <linux/debugfs.h>
23550 +#include <asm/uaccess.h>
23551 #include "internal.h"
23552
23553 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
23554 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
23555 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
23556 */
23557 unsigned int size = EC_SPACE_SIZE;
23558 - u8 *data = (u8 *) buf;
23559 + u8 data;
23560 loff_t init_off = *off;
23561 int err = 0;
23562
23563 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
23564 size = count;
23565
23566 while (size) {
23567 - err = ec_read(*off, &data[*off - init_off]);
23568 + err = ec_read(*off, &data);
23569 if (err)
23570 return err;
23571 + if (put_user(data, &buf[*off - init_off]))
23572 + return -EFAULT;
23573 *off += 1;
23574 size--;
23575 }
23576 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23577
23578 unsigned int size = count;
23579 loff_t init_off = *off;
23580 - u8 *data = (u8 *) buf;
23581 int err = 0;
23582
23583 if (*off >= EC_SPACE_SIZE)
23584 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23585 }
23586
23587 while (size) {
23588 - u8 byte_write = data[*off - init_off];
23589 + u8 byte_write;
23590 + if (get_user(byte_write, &buf[*off - init_off]))
23591 + return -EFAULT;
23592 err = ec_write(*off, byte_write);
23593 if (err)
23594 return err;
23595 diff -urNp linux-3.0.8/drivers/acpi/proc.c linux-3.0.8/drivers/acpi/proc.c
23596 --- linux-3.0.8/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
23597 +++ linux-3.0.8/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
23598 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23599 size_t count, loff_t * ppos)
23600 {
23601 struct list_head *node, *next;
23602 - char strbuf[5];
23603 - char str[5] = "";
23604 - unsigned int len = count;
23605 -
23606 - if (len > 4)
23607 - len = 4;
23608 - if (len < 0)
23609 - return -EFAULT;
23610 + char strbuf[5] = {0};
23611
23612 - if (copy_from_user(strbuf, buffer, len))
23613 + if (count > 4)
23614 + count = 4;
23615 + if (copy_from_user(strbuf, buffer, count))
23616 return -EFAULT;
23617 - strbuf[len] = '\0';
23618 - sscanf(strbuf, "%s", str);
23619 + strbuf[count] = '\0';
23620
23621 mutex_lock(&acpi_device_lock);
23622 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23623 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23624 if (!dev->wakeup.flags.valid)
23625 continue;
23626
23627 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23628 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23629 if (device_can_wakeup(&dev->dev)) {
23630 bool enable = !device_may_wakeup(&dev->dev);
23631 device_set_wakeup_enable(&dev->dev, enable);
23632 diff -urNp linux-3.0.8/drivers/acpi/processor_driver.c linux-3.0.8/drivers/acpi/processor_driver.c
23633 --- linux-3.0.8/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
23634 +++ linux-3.0.8/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
23635 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23636 return 0;
23637 #endif
23638
23639 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23640 + BUG_ON(pr->id >= nr_cpu_ids);
23641
23642 /*
23643 * Buggy BIOS check
23644 diff -urNp linux-3.0.8/drivers/ata/libata-core.c linux-3.0.8/drivers/ata/libata-core.c
23645 --- linux-3.0.8/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
23646 +++ linux-3.0.8/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
23647 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
23648 struct ata_port *ap;
23649 unsigned int tag;
23650
23651 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23652 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23653 ap = qc->ap;
23654
23655 qc->flags = 0;
23656 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
23657 struct ata_port *ap;
23658 struct ata_link *link;
23659
23660 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23661 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23662 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23663 ap = qc->ap;
23664 link = qc->dev->link;
23665 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
23666 return;
23667
23668 spin_lock(&lock);
23669 + pax_open_kernel();
23670
23671 for (cur = ops->inherits; cur; cur = cur->inherits) {
23672 void **inherit = (void **)cur;
23673 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
23674 if (IS_ERR(*pp))
23675 *pp = NULL;
23676
23677 - ops->inherits = NULL;
23678 + *(struct ata_port_operations **)&ops->inherits = NULL;
23679
23680 + pax_close_kernel();
23681 spin_unlock(&lock);
23682 }
23683
23684 diff -urNp linux-3.0.8/drivers/ata/libata-eh.c linux-3.0.8/drivers/ata/libata-eh.c
23685 --- linux-3.0.8/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
23686 +++ linux-3.0.8/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
23687 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
23688 {
23689 struct ata_link *link;
23690
23691 + pax_track_stack();
23692 +
23693 ata_for_each_link(link, ap, HOST_FIRST)
23694 ata_eh_link_report(link);
23695 }
23696 diff -urNp linux-3.0.8/drivers/ata/pata_arasan_cf.c linux-3.0.8/drivers/ata/pata_arasan_cf.c
23697 --- linux-3.0.8/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
23698 +++ linux-3.0.8/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
23699 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23700 /* Handle platform specific quirks */
23701 if (pdata->quirk) {
23702 if (pdata->quirk & CF_BROKEN_PIO) {
23703 - ap->ops->set_piomode = NULL;
23704 + pax_open_kernel();
23705 + *(void **)&ap->ops->set_piomode = NULL;
23706 + pax_close_kernel();
23707 ap->pio_mask = 0;
23708 }
23709 if (pdata->quirk & CF_BROKEN_MWDMA)
23710 diff -urNp linux-3.0.8/drivers/atm/adummy.c linux-3.0.8/drivers/atm/adummy.c
23711 --- linux-3.0.8/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
23712 +++ linux-3.0.8/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
23713 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23714 vcc->pop(vcc, skb);
23715 else
23716 dev_kfree_skb_any(skb);
23717 - atomic_inc(&vcc->stats->tx);
23718 + atomic_inc_unchecked(&vcc->stats->tx);
23719
23720 return 0;
23721 }
23722 diff -urNp linux-3.0.8/drivers/atm/ambassador.c linux-3.0.8/drivers/atm/ambassador.c
23723 --- linux-3.0.8/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
23724 +++ linux-3.0.8/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
23725 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23726 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23727
23728 // VC layer stats
23729 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23730 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23731
23732 // free the descriptor
23733 kfree (tx_descr);
23734 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23735 dump_skb ("<<<", vc, skb);
23736
23737 // VC layer stats
23738 - atomic_inc(&atm_vcc->stats->rx);
23739 + atomic_inc_unchecked(&atm_vcc->stats->rx);
23740 __net_timestamp(skb);
23741 // end of our responsibility
23742 atm_vcc->push (atm_vcc, skb);
23743 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23744 } else {
23745 PRINTK (KERN_INFO, "dropped over-size frame");
23746 // should we count this?
23747 - atomic_inc(&atm_vcc->stats->rx_drop);
23748 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23749 }
23750
23751 } else {
23752 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
23753 }
23754
23755 if (check_area (skb->data, skb->len)) {
23756 - atomic_inc(&atm_vcc->stats->tx_err);
23757 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23758 return -ENOMEM; // ?
23759 }
23760
23761 diff -urNp linux-3.0.8/drivers/atm/atmtcp.c linux-3.0.8/drivers/atm/atmtcp.c
23762 --- linux-3.0.8/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
23763 +++ linux-3.0.8/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
23764 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23765 if (vcc->pop) vcc->pop(vcc,skb);
23766 else dev_kfree_skb(skb);
23767 if (dev_data) return 0;
23768 - atomic_inc(&vcc->stats->tx_err);
23769 + atomic_inc_unchecked(&vcc->stats->tx_err);
23770 return -ENOLINK;
23771 }
23772 size = skb->len+sizeof(struct atmtcp_hdr);
23773 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23774 if (!new_skb) {
23775 if (vcc->pop) vcc->pop(vcc,skb);
23776 else dev_kfree_skb(skb);
23777 - atomic_inc(&vcc->stats->tx_err);
23778 + atomic_inc_unchecked(&vcc->stats->tx_err);
23779 return -ENOBUFS;
23780 }
23781 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23782 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23783 if (vcc->pop) vcc->pop(vcc,skb);
23784 else dev_kfree_skb(skb);
23785 out_vcc->push(out_vcc,new_skb);
23786 - atomic_inc(&vcc->stats->tx);
23787 - atomic_inc(&out_vcc->stats->rx);
23788 + atomic_inc_unchecked(&vcc->stats->tx);
23789 + atomic_inc_unchecked(&out_vcc->stats->rx);
23790 return 0;
23791 }
23792
23793 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23794 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23795 read_unlock(&vcc_sklist_lock);
23796 if (!out_vcc) {
23797 - atomic_inc(&vcc->stats->tx_err);
23798 + atomic_inc_unchecked(&vcc->stats->tx_err);
23799 goto done;
23800 }
23801 skb_pull(skb,sizeof(struct atmtcp_hdr));
23802 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23803 __net_timestamp(new_skb);
23804 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23805 out_vcc->push(out_vcc,new_skb);
23806 - atomic_inc(&vcc->stats->tx);
23807 - atomic_inc(&out_vcc->stats->rx);
23808 + atomic_inc_unchecked(&vcc->stats->tx);
23809 + atomic_inc_unchecked(&out_vcc->stats->rx);
23810 done:
23811 if (vcc->pop) vcc->pop(vcc,skb);
23812 else dev_kfree_skb(skb);
23813 diff -urNp linux-3.0.8/drivers/atm/eni.c linux-3.0.8/drivers/atm/eni.c
23814 --- linux-3.0.8/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
23815 +++ linux-3.0.8/drivers/atm/eni.c 2011-10-11 10:44:33.000000000 -0400
23816 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23817 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23818 vcc->dev->number);
23819 length = 0;
23820 - atomic_inc(&vcc->stats->rx_err);
23821 + atomic_inc_unchecked(&vcc->stats->rx_err);
23822 }
23823 else {
23824 length = ATM_CELL_SIZE-1; /* no HEC */
23825 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23826 size);
23827 }
23828 eff = length = 0;
23829 - atomic_inc(&vcc->stats->rx_err);
23830 + atomic_inc_unchecked(&vcc->stats->rx_err);
23831 }
23832 else {
23833 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23834 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23835 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23836 vcc->dev->number,vcc->vci,length,size << 2,descr);
23837 length = eff = 0;
23838 - atomic_inc(&vcc->stats->rx_err);
23839 + atomic_inc_unchecked(&vcc->stats->rx_err);
23840 }
23841 }
23842 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23843 @@ -771,7 +771,7 @@ rx_dequeued++;
23844 vcc->push(vcc,skb);
23845 pushed++;
23846 }
23847 - atomic_inc(&vcc->stats->rx);
23848 + atomic_inc_unchecked(&vcc->stats->rx);
23849 }
23850 wake_up(&eni_dev->rx_wait);
23851 }
23852 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23853 PCI_DMA_TODEVICE);
23854 if (vcc->pop) vcc->pop(vcc,skb);
23855 else dev_kfree_skb_irq(skb);
23856 - atomic_inc(&vcc->stats->tx);
23857 + atomic_inc_unchecked(&vcc->stats->tx);
23858 wake_up(&eni_dev->tx_wait);
23859 dma_complete++;
23860 }
23861 @@ -1568,7 +1568,7 @@ tx_complete++;
23862 /*--------------------------------- entries ---------------------------------*/
23863
23864
23865 -static const char *media_name[] __devinitdata = {
23866 +static const char *media_name[] __devinitconst = {
23867 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23868 "UTP", "05?", "06?", "07?", /* 4- 7 */
23869 "TAXI","09?", "10?", "11?", /* 8-11 */
23870 diff -urNp linux-3.0.8/drivers/atm/firestream.c linux-3.0.8/drivers/atm/firestream.c
23871 --- linux-3.0.8/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
23872 +++ linux-3.0.8/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
23873 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
23874 }
23875 }
23876
23877 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23878 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23879
23880 fs_dprintk (FS_DEBUG_TXMEM, "i");
23881 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23882 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
23883 #endif
23884 skb_put (skb, qe->p1 & 0xffff);
23885 ATM_SKB(skb)->vcc = atm_vcc;
23886 - atomic_inc(&atm_vcc->stats->rx);
23887 + atomic_inc_unchecked(&atm_vcc->stats->rx);
23888 __net_timestamp(skb);
23889 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23890 atm_vcc->push (atm_vcc, skb);
23891 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
23892 kfree (pe);
23893 }
23894 if (atm_vcc)
23895 - atomic_inc(&atm_vcc->stats->rx_drop);
23896 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23897 break;
23898 case 0x1f: /* Reassembly abort: no buffers. */
23899 /* Silently increment error counter. */
23900 if (atm_vcc)
23901 - atomic_inc(&atm_vcc->stats->rx_drop);
23902 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23903 break;
23904 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23905 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23906 diff -urNp linux-3.0.8/drivers/atm/fore200e.c linux-3.0.8/drivers/atm/fore200e.c
23907 --- linux-3.0.8/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
23908 +++ linux-3.0.8/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
23909 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23910 #endif
23911 /* check error condition */
23912 if (*entry->status & STATUS_ERROR)
23913 - atomic_inc(&vcc->stats->tx_err);
23914 + atomic_inc_unchecked(&vcc->stats->tx_err);
23915 else
23916 - atomic_inc(&vcc->stats->tx);
23917 + atomic_inc_unchecked(&vcc->stats->tx);
23918 }
23919 }
23920
23921 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23922 if (skb == NULL) {
23923 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23924
23925 - atomic_inc(&vcc->stats->rx_drop);
23926 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23927 return -ENOMEM;
23928 }
23929
23930 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23931
23932 dev_kfree_skb_any(skb);
23933
23934 - atomic_inc(&vcc->stats->rx_drop);
23935 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23936 return -ENOMEM;
23937 }
23938
23939 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23940
23941 vcc->push(vcc, skb);
23942 - atomic_inc(&vcc->stats->rx);
23943 + atomic_inc_unchecked(&vcc->stats->rx);
23944
23945 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23946
23947 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23948 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23949 fore200e->atm_dev->number,
23950 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23951 - atomic_inc(&vcc->stats->rx_err);
23952 + atomic_inc_unchecked(&vcc->stats->rx_err);
23953 }
23954 }
23955
23956 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23957 goto retry_here;
23958 }
23959
23960 - atomic_inc(&vcc->stats->tx_err);
23961 + atomic_inc_unchecked(&vcc->stats->tx_err);
23962
23963 fore200e->tx_sat++;
23964 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23965 diff -urNp linux-3.0.8/drivers/atm/he.c linux-3.0.8/drivers/atm/he.c
23966 --- linux-3.0.8/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
23967 +++ linux-3.0.8/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
23968 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23969
23970 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23971 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23972 - atomic_inc(&vcc->stats->rx_drop);
23973 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23974 goto return_host_buffers;
23975 }
23976
23977 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23978 RBRQ_LEN_ERR(he_dev->rbrq_head)
23979 ? "LEN_ERR" : "",
23980 vcc->vpi, vcc->vci);
23981 - atomic_inc(&vcc->stats->rx_err);
23982 + atomic_inc_unchecked(&vcc->stats->rx_err);
23983 goto return_host_buffers;
23984 }
23985
23986 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23987 vcc->push(vcc, skb);
23988 spin_lock(&he_dev->global_lock);
23989
23990 - atomic_inc(&vcc->stats->rx);
23991 + atomic_inc_unchecked(&vcc->stats->rx);
23992
23993 return_host_buffers:
23994 ++pdus_assembled;
23995 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23996 tpd->vcc->pop(tpd->vcc, tpd->skb);
23997 else
23998 dev_kfree_skb_any(tpd->skb);
23999 - atomic_inc(&tpd->vcc->stats->tx_err);
24000 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
24001 }
24002 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
24003 return;
24004 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24005 vcc->pop(vcc, skb);
24006 else
24007 dev_kfree_skb_any(skb);
24008 - atomic_inc(&vcc->stats->tx_err);
24009 + atomic_inc_unchecked(&vcc->stats->tx_err);
24010 return -EINVAL;
24011 }
24012
24013 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24014 vcc->pop(vcc, skb);
24015 else
24016 dev_kfree_skb_any(skb);
24017 - atomic_inc(&vcc->stats->tx_err);
24018 + atomic_inc_unchecked(&vcc->stats->tx_err);
24019 return -EINVAL;
24020 }
24021 #endif
24022 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24023 vcc->pop(vcc, skb);
24024 else
24025 dev_kfree_skb_any(skb);
24026 - atomic_inc(&vcc->stats->tx_err);
24027 + atomic_inc_unchecked(&vcc->stats->tx_err);
24028 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24029 return -ENOMEM;
24030 }
24031 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24032 vcc->pop(vcc, skb);
24033 else
24034 dev_kfree_skb_any(skb);
24035 - atomic_inc(&vcc->stats->tx_err);
24036 + atomic_inc_unchecked(&vcc->stats->tx_err);
24037 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24038 return -ENOMEM;
24039 }
24040 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24041 __enqueue_tpd(he_dev, tpd, cid);
24042 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24043
24044 - atomic_inc(&vcc->stats->tx);
24045 + atomic_inc_unchecked(&vcc->stats->tx);
24046
24047 return 0;
24048 }
24049 diff -urNp linux-3.0.8/drivers/atm/horizon.c linux-3.0.8/drivers/atm/horizon.c
24050 --- linux-3.0.8/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
24051 +++ linux-3.0.8/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
24052 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
24053 {
24054 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
24055 // VC layer stats
24056 - atomic_inc(&vcc->stats->rx);
24057 + atomic_inc_unchecked(&vcc->stats->rx);
24058 __net_timestamp(skb);
24059 // end of our responsibility
24060 vcc->push (vcc, skb);
24061 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
24062 dev->tx_iovec = NULL;
24063
24064 // VC layer stats
24065 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24066 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24067
24068 // free the skb
24069 hrz_kfree_skb (skb);
24070 diff -urNp linux-3.0.8/drivers/atm/idt77252.c linux-3.0.8/drivers/atm/idt77252.c
24071 --- linux-3.0.8/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
24072 +++ linux-3.0.8/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
24073 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
24074 else
24075 dev_kfree_skb(skb);
24076
24077 - atomic_inc(&vcc->stats->tx);
24078 + atomic_inc_unchecked(&vcc->stats->tx);
24079 }
24080
24081 atomic_dec(&scq->used);
24082 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
24083 if ((sb = dev_alloc_skb(64)) == NULL) {
24084 printk("%s: Can't allocate buffers for aal0.\n",
24085 card->name);
24086 - atomic_add(i, &vcc->stats->rx_drop);
24087 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
24088 break;
24089 }
24090 if (!atm_charge(vcc, sb->truesize)) {
24091 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
24092 card->name);
24093 - atomic_add(i - 1, &vcc->stats->rx_drop);
24094 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
24095 dev_kfree_skb(sb);
24096 break;
24097 }
24098 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
24099 ATM_SKB(sb)->vcc = vcc;
24100 __net_timestamp(sb);
24101 vcc->push(vcc, sb);
24102 - atomic_inc(&vcc->stats->rx);
24103 + atomic_inc_unchecked(&vcc->stats->rx);
24104
24105 cell += ATM_CELL_PAYLOAD;
24106 }
24107 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
24108 "(CDC: %08x)\n",
24109 card->name, len, rpp->len, readl(SAR_REG_CDC));
24110 recycle_rx_pool_skb(card, rpp);
24111 - atomic_inc(&vcc->stats->rx_err);
24112 + atomic_inc_unchecked(&vcc->stats->rx_err);
24113 return;
24114 }
24115 if (stat & SAR_RSQE_CRC) {
24116 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
24117 recycle_rx_pool_skb(card, rpp);
24118 - atomic_inc(&vcc->stats->rx_err);
24119 + atomic_inc_unchecked(&vcc->stats->rx_err);
24120 return;
24121 }
24122 if (skb_queue_len(&rpp->queue) > 1) {
24123 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
24124 RXPRINTK("%s: Can't alloc RX skb.\n",
24125 card->name);
24126 recycle_rx_pool_skb(card, rpp);
24127 - atomic_inc(&vcc->stats->rx_err);
24128 + atomic_inc_unchecked(&vcc->stats->rx_err);
24129 return;
24130 }
24131 if (!atm_charge(vcc, skb->truesize)) {
24132 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
24133 __net_timestamp(skb);
24134
24135 vcc->push(vcc, skb);
24136 - atomic_inc(&vcc->stats->rx);
24137 + atomic_inc_unchecked(&vcc->stats->rx);
24138
24139 return;
24140 }
24141 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
24142 __net_timestamp(skb);
24143
24144 vcc->push(vcc, skb);
24145 - atomic_inc(&vcc->stats->rx);
24146 + atomic_inc_unchecked(&vcc->stats->rx);
24147
24148 if (skb->truesize > SAR_FB_SIZE_3)
24149 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
24150 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
24151 if (vcc->qos.aal != ATM_AAL0) {
24152 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
24153 card->name, vpi, vci);
24154 - atomic_inc(&vcc->stats->rx_drop);
24155 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24156 goto drop;
24157 }
24158
24159 if ((sb = dev_alloc_skb(64)) == NULL) {
24160 printk("%s: Can't allocate buffers for AAL0.\n",
24161 card->name);
24162 - atomic_inc(&vcc->stats->rx_err);
24163 + atomic_inc_unchecked(&vcc->stats->rx_err);
24164 goto drop;
24165 }
24166
24167 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
24168 ATM_SKB(sb)->vcc = vcc;
24169 __net_timestamp(sb);
24170 vcc->push(vcc, sb);
24171 - atomic_inc(&vcc->stats->rx);
24172 + atomic_inc_unchecked(&vcc->stats->rx);
24173
24174 drop:
24175 skb_pull(queue, 64);
24176 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24177
24178 if (vc == NULL) {
24179 printk("%s: NULL connection in send().\n", card->name);
24180 - atomic_inc(&vcc->stats->tx_err);
24181 + atomic_inc_unchecked(&vcc->stats->tx_err);
24182 dev_kfree_skb(skb);
24183 return -EINVAL;
24184 }
24185 if (!test_bit(VCF_TX, &vc->flags)) {
24186 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
24187 - atomic_inc(&vcc->stats->tx_err);
24188 + atomic_inc_unchecked(&vcc->stats->tx_err);
24189 dev_kfree_skb(skb);
24190 return -EINVAL;
24191 }
24192 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24193 break;
24194 default:
24195 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
24196 - atomic_inc(&vcc->stats->tx_err);
24197 + atomic_inc_unchecked(&vcc->stats->tx_err);
24198 dev_kfree_skb(skb);
24199 return -EINVAL;
24200 }
24201
24202 if (skb_shinfo(skb)->nr_frags != 0) {
24203 printk("%s: No scatter-gather yet.\n", card->name);
24204 - atomic_inc(&vcc->stats->tx_err);
24205 + atomic_inc_unchecked(&vcc->stats->tx_err);
24206 dev_kfree_skb(skb);
24207 return -EINVAL;
24208 }
24209 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24210
24211 err = queue_skb(card, vc, skb, oam);
24212 if (err) {
24213 - atomic_inc(&vcc->stats->tx_err);
24214 + atomic_inc_unchecked(&vcc->stats->tx_err);
24215 dev_kfree_skb(skb);
24216 return err;
24217 }
24218 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
24219 skb = dev_alloc_skb(64);
24220 if (!skb) {
24221 printk("%s: Out of memory in send_oam().\n", card->name);
24222 - atomic_inc(&vcc->stats->tx_err);
24223 + atomic_inc_unchecked(&vcc->stats->tx_err);
24224 return -ENOMEM;
24225 }
24226 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
24227 diff -urNp linux-3.0.8/drivers/atm/iphase.c linux-3.0.8/drivers/atm/iphase.c
24228 --- linux-3.0.8/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
24229 +++ linux-3.0.8/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
24230 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
24231 status = (u_short) (buf_desc_ptr->desc_mode);
24232 if (status & (RX_CER | RX_PTE | RX_OFL))
24233 {
24234 - atomic_inc(&vcc->stats->rx_err);
24235 + atomic_inc_unchecked(&vcc->stats->rx_err);
24236 IF_ERR(printk("IA: bad packet, dropping it");)
24237 if (status & RX_CER) {
24238 IF_ERR(printk(" cause: packet CRC error\n");)
24239 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
24240 len = dma_addr - buf_addr;
24241 if (len > iadev->rx_buf_sz) {
24242 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
24243 - atomic_inc(&vcc->stats->rx_err);
24244 + atomic_inc_unchecked(&vcc->stats->rx_err);
24245 goto out_free_desc;
24246 }
24247
24248 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
24249 ia_vcc = INPH_IA_VCC(vcc);
24250 if (ia_vcc == NULL)
24251 {
24252 - atomic_inc(&vcc->stats->rx_err);
24253 + atomic_inc_unchecked(&vcc->stats->rx_err);
24254 dev_kfree_skb_any(skb);
24255 atm_return(vcc, atm_guess_pdu2truesize(len));
24256 goto INCR_DLE;
24257 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
24258 if ((length > iadev->rx_buf_sz) || (length >
24259 (skb->len - sizeof(struct cpcs_trailer))))
24260 {
24261 - atomic_inc(&vcc->stats->rx_err);
24262 + atomic_inc_unchecked(&vcc->stats->rx_err);
24263 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
24264 length, skb->len);)
24265 dev_kfree_skb_any(skb);
24266 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
24267
24268 IF_RX(printk("rx_dle_intr: skb push");)
24269 vcc->push(vcc,skb);
24270 - atomic_inc(&vcc->stats->rx);
24271 + atomic_inc_unchecked(&vcc->stats->rx);
24272 iadev->rx_pkt_cnt++;
24273 }
24274 INCR_DLE:
24275 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
24276 {
24277 struct k_sonet_stats *stats;
24278 stats = &PRIV(_ia_dev[board])->sonet_stats;
24279 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
24280 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
24281 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
24282 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
24283 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
24284 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
24285 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
24286 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
24287 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
24288 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
24289 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
24290 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
24291 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
24292 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
24293 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
24294 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
24295 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
24296 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
24297 }
24298 ia_cmds.status = 0;
24299 break;
24300 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
24301 if ((desc == 0) || (desc > iadev->num_tx_desc))
24302 {
24303 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
24304 - atomic_inc(&vcc->stats->tx);
24305 + atomic_inc_unchecked(&vcc->stats->tx);
24306 if (vcc->pop)
24307 vcc->pop(vcc, skb);
24308 else
24309 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
24310 ATM_DESC(skb) = vcc->vci;
24311 skb_queue_tail(&iadev->tx_dma_q, skb);
24312
24313 - atomic_inc(&vcc->stats->tx);
24314 + atomic_inc_unchecked(&vcc->stats->tx);
24315 iadev->tx_pkt_cnt++;
24316 /* Increment transaction counter */
24317 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
24318
24319 #if 0
24320 /* add flow control logic */
24321 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
24322 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
24323 if (iavcc->vc_desc_cnt > 10) {
24324 vcc->tx_quota = vcc->tx_quota * 3 / 4;
24325 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
24326 diff -urNp linux-3.0.8/drivers/atm/lanai.c linux-3.0.8/drivers/atm/lanai.c
24327 --- linux-3.0.8/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
24328 +++ linux-3.0.8/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
24329 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
24330 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
24331 lanai_endtx(lanai, lvcc);
24332 lanai_free_skb(lvcc->tx.atmvcc, skb);
24333 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
24334 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
24335 }
24336
24337 /* Try to fill the buffer - don't call unless there is backlog */
24338 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
24339 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
24340 __net_timestamp(skb);
24341 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
24342 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
24343 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
24344 out:
24345 lvcc->rx.buf.ptr = end;
24346 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
24347 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
24348 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
24349 "vcc %d\n", lanai->number, (unsigned int) s, vci);
24350 lanai->stats.service_rxnotaal5++;
24351 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24352 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24353 return 0;
24354 }
24355 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
24356 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
24357 int bytes;
24358 read_unlock(&vcc_sklist_lock);
24359 DPRINTK("got trashed rx pdu on vci %d\n", vci);
24360 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24361 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24362 lvcc->stats.x.aal5.service_trash++;
24363 bytes = (SERVICE_GET_END(s) * 16) -
24364 (((unsigned long) lvcc->rx.buf.ptr) -
24365 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
24366 }
24367 if (s & SERVICE_STREAM) {
24368 read_unlock(&vcc_sklist_lock);
24369 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24370 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24371 lvcc->stats.x.aal5.service_stream++;
24372 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
24373 "PDU on VCI %d!\n", lanai->number, vci);
24374 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
24375 return 0;
24376 }
24377 DPRINTK("got rx crc error on vci %d\n", vci);
24378 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24379 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24380 lvcc->stats.x.aal5.service_rxcrc++;
24381 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
24382 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
24383 diff -urNp linux-3.0.8/drivers/atm/nicstar.c linux-3.0.8/drivers/atm/nicstar.c
24384 --- linux-3.0.8/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
24385 +++ linux-3.0.8/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
24386 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
24387 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
24388 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
24389 card->index);
24390 - atomic_inc(&vcc->stats->tx_err);
24391 + atomic_inc_unchecked(&vcc->stats->tx_err);
24392 dev_kfree_skb_any(skb);
24393 return -EINVAL;
24394 }
24395 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
24396 if (!vc->tx) {
24397 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
24398 card->index);
24399 - atomic_inc(&vcc->stats->tx_err);
24400 + atomic_inc_unchecked(&vcc->stats->tx_err);
24401 dev_kfree_skb_any(skb);
24402 return -EINVAL;
24403 }
24404 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
24405 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
24406 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
24407 card->index);
24408 - atomic_inc(&vcc->stats->tx_err);
24409 + atomic_inc_unchecked(&vcc->stats->tx_err);
24410 dev_kfree_skb_any(skb);
24411 return -EINVAL;
24412 }
24413
24414 if (skb_shinfo(skb)->nr_frags != 0) {
24415 printk("nicstar%d: No scatter-gather yet.\n", card->index);
24416 - atomic_inc(&vcc->stats->tx_err);
24417 + atomic_inc_unchecked(&vcc->stats->tx_err);
24418 dev_kfree_skb_any(skb);
24419 return -EINVAL;
24420 }
24421 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
24422 }
24423
24424 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
24425 - atomic_inc(&vcc->stats->tx_err);
24426 + atomic_inc_unchecked(&vcc->stats->tx_err);
24427 dev_kfree_skb_any(skb);
24428 return -EIO;
24429 }
24430 - atomic_inc(&vcc->stats->tx);
24431 + atomic_inc_unchecked(&vcc->stats->tx);
24432
24433 return 0;
24434 }
24435 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
24436 printk
24437 ("nicstar%d: Can't allocate buffers for aal0.\n",
24438 card->index);
24439 - atomic_add(i, &vcc->stats->rx_drop);
24440 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
24441 break;
24442 }
24443 if (!atm_charge(vcc, sb->truesize)) {
24444 RXPRINTK
24445 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
24446 card->index);
24447 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24448 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24449 dev_kfree_skb_any(sb);
24450 break;
24451 }
24452 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
24453 ATM_SKB(sb)->vcc = vcc;
24454 __net_timestamp(sb);
24455 vcc->push(vcc, sb);
24456 - atomic_inc(&vcc->stats->rx);
24457 + atomic_inc_unchecked(&vcc->stats->rx);
24458 cell += ATM_CELL_PAYLOAD;
24459 }
24460
24461 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
24462 if (iovb == NULL) {
24463 printk("nicstar%d: Out of iovec buffers.\n",
24464 card->index);
24465 - atomic_inc(&vcc->stats->rx_drop);
24466 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24467 recycle_rx_buf(card, skb);
24468 return;
24469 }
24470 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
24471 small or large buffer itself. */
24472 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
24473 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
24474 - atomic_inc(&vcc->stats->rx_err);
24475 + atomic_inc_unchecked(&vcc->stats->rx_err);
24476 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24477 NS_MAX_IOVECS);
24478 NS_PRV_IOVCNT(iovb) = 0;
24479 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
24480 ("nicstar%d: Expected a small buffer, and this is not one.\n",
24481 card->index);
24482 which_list(card, skb);
24483 - atomic_inc(&vcc->stats->rx_err);
24484 + atomic_inc_unchecked(&vcc->stats->rx_err);
24485 recycle_rx_buf(card, skb);
24486 vc->rx_iov = NULL;
24487 recycle_iov_buf(card, iovb);
24488 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
24489 ("nicstar%d: Expected a large buffer, and this is not one.\n",
24490 card->index);
24491 which_list(card, skb);
24492 - atomic_inc(&vcc->stats->rx_err);
24493 + atomic_inc_unchecked(&vcc->stats->rx_err);
24494 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24495 NS_PRV_IOVCNT(iovb));
24496 vc->rx_iov = NULL;
24497 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
24498 printk(" - PDU size mismatch.\n");
24499 else
24500 printk(".\n");
24501 - atomic_inc(&vcc->stats->rx_err);
24502 + atomic_inc_unchecked(&vcc->stats->rx_err);
24503 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24504 NS_PRV_IOVCNT(iovb));
24505 vc->rx_iov = NULL;
24506 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
24507 /* skb points to a small buffer */
24508 if (!atm_charge(vcc, skb->truesize)) {
24509 push_rxbufs(card, skb);
24510 - atomic_inc(&vcc->stats->rx_drop);
24511 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24512 } else {
24513 skb_put(skb, len);
24514 dequeue_sm_buf(card, skb);
24515 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
24516 ATM_SKB(skb)->vcc = vcc;
24517 __net_timestamp(skb);
24518 vcc->push(vcc, skb);
24519 - atomic_inc(&vcc->stats->rx);
24520 + atomic_inc_unchecked(&vcc->stats->rx);
24521 }
24522 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
24523 struct sk_buff *sb;
24524 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
24525 if (len <= NS_SMBUFSIZE) {
24526 if (!atm_charge(vcc, sb->truesize)) {
24527 push_rxbufs(card, sb);
24528 - atomic_inc(&vcc->stats->rx_drop);
24529 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24530 } else {
24531 skb_put(sb, len);
24532 dequeue_sm_buf(card, sb);
24533 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
24534 ATM_SKB(sb)->vcc = vcc;
24535 __net_timestamp(sb);
24536 vcc->push(vcc, sb);
24537 - atomic_inc(&vcc->stats->rx);
24538 + atomic_inc_unchecked(&vcc->stats->rx);
24539 }
24540
24541 push_rxbufs(card, skb);
24542 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
24543
24544 if (!atm_charge(vcc, skb->truesize)) {
24545 push_rxbufs(card, skb);
24546 - atomic_inc(&vcc->stats->rx_drop);
24547 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24548 } else {
24549 dequeue_lg_buf(card, skb);
24550 #ifdef NS_USE_DESTRUCTORS
24551 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
24552 ATM_SKB(skb)->vcc = vcc;
24553 __net_timestamp(skb);
24554 vcc->push(vcc, skb);
24555 - atomic_inc(&vcc->stats->rx);
24556 + atomic_inc_unchecked(&vcc->stats->rx);
24557 }
24558
24559 push_rxbufs(card, sb);
24560 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
24561 printk
24562 ("nicstar%d: Out of huge buffers.\n",
24563 card->index);
24564 - atomic_inc(&vcc->stats->rx_drop);
24565 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24566 recycle_iovec_rx_bufs(card,
24567 (struct iovec *)
24568 iovb->data,
24569 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24570 card->hbpool.count++;
24571 } else
24572 dev_kfree_skb_any(hb);
24573 - atomic_inc(&vcc->stats->rx_drop);
24574 + atomic_inc_unchecked(&vcc->stats->rx_drop);
24575 } else {
24576 /* Copy the small buffer to the huge buffer */
24577 sb = (struct sk_buff *)iov->iov_base;
24578 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24579 #endif /* NS_USE_DESTRUCTORS */
24580 __net_timestamp(hb);
24581 vcc->push(vcc, hb);
24582 - atomic_inc(&vcc->stats->rx);
24583 + atomic_inc_unchecked(&vcc->stats->rx);
24584 }
24585 }
24586
24587 diff -urNp linux-3.0.8/drivers/atm/solos-pci.c linux-3.0.8/drivers/atm/solos-pci.c
24588 --- linux-3.0.8/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
24589 +++ linux-3.0.8/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
24590 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24591 }
24592 atm_charge(vcc, skb->truesize);
24593 vcc->push(vcc, skb);
24594 - atomic_inc(&vcc->stats->rx);
24595 + atomic_inc_unchecked(&vcc->stats->rx);
24596 break;
24597
24598 case PKT_STATUS:
24599 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24600 char msg[500];
24601 char item[10];
24602
24603 + pax_track_stack();
24604 +
24605 len = buf->len;
24606 for (i = 0; i < len; i++){
24607 if(i % 8 == 0)
24608 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24609 vcc = SKB_CB(oldskb)->vcc;
24610
24611 if (vcc) {
24612 - atomic_inc(&vcc->stats->tx);
24613 + atomic_inc_unchecked(&vcc->stats->tx);
24614 solos_pop(vcc, oldskb);
24615 } else
24616 dev_kfree_skb_irq(oldskb);
24617 diff -urNp linux-3.0.8/drivers/atm/suni.c linux-3.0.8/drivers/atm/suni.c
24618 --- linux-3.0.8/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
24619 +++ linux-3.0.8/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
24620 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24621
24622
24623 #define ADD_LIMITED(s,v) \
24624 - atomic_add((v),&stats->s); \
24625 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24626 + atomic_add_unchecked((v),&stats->s); \
24627 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24628
24629
24630 static void suni_hz(unsigned long from_timer)
24631 diff -urNp linux-3.0.8/drivers/atm/uPD98402.c linux-3.0.8/drivers/atm/uPD98402.c
24632 --- linux-3.0.8/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
24633 +++ linux-3.0.8/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
24634 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24635 struct sonet_stats tmp;
24636 int error = 0;
24637
24638 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24639 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24640 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24641 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24642 if (zero && !error) {
24643 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24644
24645
24646 #define ADD_LIMITED(s,v) \
24647 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24648 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24649 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24650 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24651 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24652 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24653
24654
24655 static void stat_event(struct atm_dev *dev)
24656 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24657 if (reason & uPD98402_INT_PFM) stat_event(dev);
24658 if (reason & uPD98402_INT_PCO) {
24659 (void) GET(PCOCR); /* clear interrupt cause */
24660 - atomic_add(GET(HECCT),
24661 + atomic_add_unchecked(GET(HECCT),
24662 &PRIV(dev)->sonet_stats.uncorr_hcs);
24663 }
24664 if ((reason & uPD98402_INT_RFO) &&
24665 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24666 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24667 uPD98402_INT_LOS),PIMR); /* enable them */
24668 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24669 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24670 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24671 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24672 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24673 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24674 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24675 return 0;
24676 }
24677
24678 diff -urNp linux-3.0.8/drivers/atm/zatm.c linux-3.0.8/drivers/atm/zatm.c
24679 --- linux-3.0.8/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
24680 +++ linux-3.0.8/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
24681 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24682 }
24683 if (!size) {
24684 dev_kfree_skb_irq(skb);
24685 - if (vcc) atomic_inc(&vcc->stats->rx_err);
24686 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24687 continue;
24688 }
24689 if (!atm_charge(vcc,skb->truesize)) {
24690 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24691 skb->len = size;
24692 ATM_SKB(skb)->vcc = vcc;
24693 vcc->push(vcc,skb);
24694 - atomic_inc(&vcc->stats->rx);
24695 + atomic_inc_unchecked(&vcc->stats->rx);
24696 }
24697 zout(pos & 0xffff,MTA(mbx));
24698 #if 0 /* probably a stupid idea */
24699 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24700 skb_queue_head(&zatm_vcc->backlog,skb);
24701 break;
24702 }
24703 - atomic_inc(&vcc->stats->tx);
24704 + atomic_inc_unchecked(&vcc->stats->tx);
24705 wake_up(&zatm_vcc->tx_wait);
24706 }
24707
24708 diff -urNp linux-3.0.8/drivers/base/devtmpfs.c linux-3.0.8/drivers/base/devtmpfs.c
24709 --- linux-3.0.8/drivers/base/devtmpfs.c 2011-07-21 22:17:23.000000000 -0400
24710 +++ linux-3.0.8/drivers/base/devtmpfs.c 2011-10-06 04:17:55.000000000 -0400
24711 @@ -357,7 +357,7 @@ int devtmpfs_mount(const char *mntdir)
24712 if (!dev_mnt)
24713 return 0;
24714
24715 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24716 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24717 if (err)
24718 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24719 else
24720 diff -urNp linux-3.0.8/drivers/base/power/wakeup.c linux-3.0.8/drivers/base/power/wakeup.c
24721 --- linux-3.0.8/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
24722 +++ linux-3.0.8/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
24723 @@ -29,14 +29,14 @@ bool events_check_enabled;
24724 * They need to be modified together atomically, so it's better to use one
24725 * atomic variable to hold them both.
24726 */
24727 -static atomic_t combined_event_count = ATOMIC_INIT(0);
24728 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24729
24730 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24731 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24732
24733 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24734 {
24735 - unsigned int comb = atomic_read(&combined_event_count);
24736 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
24737
24738 *cnt = (comb >> IN_PROGRESS_BITS);
24739 *inpr = comb & MAX_IN_PROGRESS;
24740 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24741 ws->last_time = ktime_get();
24742
24743 /* Increment the counter of events in progress. */
24744 - atomic_inc(&combined_event_count);
24745 + atomic_inc_unchecked(&combined_event_count);
24746 }
24747
24748 /**
24749 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24750 * Increment the counter of registered wakeup events and decrement the
24751 * couter of wakeup events in progress simultaneously.
24752 */
24753 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24754 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24755 }
24756
24757 /**
24758 diff -urNp linux-3.0.8/drivers/block/cciss.c linux-3.0.8/drivers/block/cciss.c
24759 --- linux-3.0.8/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
24760 +++ linux-3.0.8/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
24761 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24762 int err;
24763 u32 cp;
24764
24765 + memset(&arg64, 0, sizeof(arg64));
24766 +
24767 err = 0;
24768 err |=
24769 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24770 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24771 while (!list_empty(&h->reqQ)) {
24772 c = list_entry(h->reqQ.next, CommandList_struct, list);
24773 /* can't do anything if fifo is full */
24774 - if ((h->access.fifo_full(h))) {
24775 + if ((h->access->fifo_full(h))) {
24776 dev_warn(&h->pdev->dev, "fifo full\n");
24777 break;
24778 }
24779 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24780 h->Qdepth--;
24781
24782 /* Tell the controller execute command */
24783 - h->access.submit_command(h, c);
24784 + h->access->submit_command(h, c);
24785
24786 /* Put job onto the completed Q */
24787 addQ(&h->cmpQ, c);
24788 @@ -3422,17 +3424,17 @@ startio:
24789
24790 static inline unsigned long get_next_completion(ctlr_info_t *h)
24791 {
24792 - return h->access.command_completed(h);
24793 + return h->access->command_completed(h);
24794 }
24795
24796 static inline int interrupt_pending(ctlr_info_t *h)
24797 {
24798 - return h->access.intr_pending(h);
24799 + return h->access->intr_pending(h);
24800 }
24801
24802 static inline long interrupt_not_for_us(ctlr_info_t *h)
24803 {
24804 - return ((h->access.intr_pending(h) == 0) ||
24805 + return ((h->access->intr_pending(h) == 0) ||
24806 (h->interrupts_enabled == 0));
24807 }
24808
24809 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24810 u32 a;
24811
24812 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24813 - return h->access.command_completed(h);
24814 + return h->access->command_completed(h);
24815
24816 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24817 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24818 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24819 trans_support & CFGTBL_Trans_use_short_tags);
24820
24821 /* Change the access methods to the performant access methods */
24822 - h->access = SA5_performant_access;
24823 + h->access = &SA5_performant_access;
24824 h->transMethod = CFGTBL_Trans_Performant;
24825
24826 return;
24827 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24828 if (prod_index < 0)
24829 return -ENODEV;
24830 h->product_name = products[prod_index].product_name;
24831 - h->access = *(products[prod_index].access);
24832 + h->access = products[prod_index].access;
24833
24834 if (cciss_board_disabled(h)) {
24835 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24836 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
24837 }
24838
24839 /* make sure the board interrupts are off */
24840 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24841 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24842 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24843 if (rc)
24844 goto clean2;
24845 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
24846 * fake ones to scoop up any residual completions.
24847 */
24848 spin_lock_irqsave(&h->lock, flags);
24849 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24850 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24851 spin_unlock_irqrestore(&h->lock, flags);
24852 free_irq(h->intr[PERF_MODE_INT], h);
24853 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24854 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
24855 dev_info(&h->pdev->dev, "Board READY.\n");
24856 dev_info(&h->pdev->dev,
24857 "Waiting for stale completions to drain.\n");
24858 - h->access.set_intr_mask(h, CCISS_INTR_ON);
24859 + h->access->set_intr_mask(h, CCISS_INTR_ON);
24860 msleep(10000);
24861 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24862 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24863
24864 rc = controller_reset_failed(h->cfgtable);
24865 if (rc)
24866 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
24867 cciss_scsi_setup(h);
24868
24869 /* Turn the interrupts on so we can service requests */
24870 - h->access.set_intr_mask(h, CCISS_INTR_ON);
24871 + h->access->set_intr_mask(h, CCISS_INTR_ON);
24872
24873 /* Get the firmware version */
24874 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24875 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
24876 kfree(flush_buf);
24877 if (return_code != IO_OK)
24878 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24879 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
24880 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
24881 free_irq(h->intr[PERF_MODE_INT], h);
24882 }
24883
24884 diff -urNp linux-3.0.8/drivers/block/cciss.h linux-3.0.8/drivers/block/cciss.h
24885 --- linux-3.0.8/drivers/block/cciss.h 2011-10-24 08:05:21.000000000 -0400
24886 +++ linux-3.0.8/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
24887 @@ -100,7 +100,7 @@ struct ctlr_info
24888 /* information about each logical volume */
24889 drive_info_struct *drv[CISS_MAX_LUN];
24890
24891 - struct access_method access;
24892 + struct access_method *access;
24893
24894 /* queue and queue Info */
24895 struct list_head reqQ;
24896 diff -urNp linux-3.0.8/drivers/block/cpqarray.c linux-3.0.8/drivers/block/cpqarray.c
24897 --- linux-3.0.8/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
24898 +++ linux-3.0.8/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
24899 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24900 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24901 goto Enomem4;
24902 }
24903 - hba[i]->access.set_intr_mask(hba[i], 0);
24904 + hba[i]->access->set_intr_mask(hba[i], 0);
24905 if (request_irq(hba[i]->intr, do_ida_intr,
24906 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24907 {
24908 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24909 add_timer(&hba[i]->timer);
24910
24911 /* Enable IRQ now that spinlock and rate limit timer are set up */
24912 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24913 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24914
24915 for(j=0; j<NWD; j++) {
24916 struct gendisk *disk = ida_gendisk[i][j];
24917 @@ -694,7 +694,7 @@ DBGINFO(
24918 for(i=0; i<NR_PRODUCTS; i++) {
24919 if (board_id == products[i].board_id) {
24920 c->product_name = products[i].product_name;
24921 - c->access = *(products[i].access);
24922 + c->access = products[i].access;
24923 break;
24924 }
24925 }
24926 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24927 hba[ctlr]->intr = intr;
24928 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24929 hba[ctlr]->product_name = products[j].product_name;
24930 - hba[ctlr]->access = *(products[j].access);
24931 + hba[ctlr]->access = products[j].access;
24932 hba[ctlr]->ctlr = ctlr;
24933 hba[ctlr]->board_id = board_id;
24934 hba[ctlr]->pci_dev = NULL; /* not PCI */
24935 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24936 struct scatterlist tmp_sg[SG_MAX];
24937 int i, dir, seg;
24938
24939 + pax_track_stack();
24940 +
24941 queue_next:
24942 creq = blk_peek_request(q);
24943 if (!creq)
24944 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24945
24946 while((c = h->reqQ) != NULL) {
24947 /* Can't do anything if we're busy */
24948 - if (h->access.fifo_full(h) == 0)
24949 + if (h->access->fifo_full(h) == 0)
24950 return;
24951
24952 /* Get the first entry from the request Q */
24953 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24954 h->Qdepth--;
24955
24956 /* Tell the controller to do our bidding */
24957 - h->access.submit_command(h, c);
24958 + h->access->submit_command(h, c);
24959
24960 /* Get onto the completion Q */
24961 addQ(&h->cmpQ, c);
24962 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24963 unsigned long flags;
24964 __u32 a,a1;
24965
24966 - istat = h->access.intr_pending(h);
24967 + istat = h->access->intr_pending(h);
24968 /* Is this interrupt for us? */
24969 if (istat == 0)
24970 return IRQ_NONE;
24971 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24972 */
24973 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24974 if (istat & FIFO_NOT_EMPTY) {
24975 - while((a = h->access.command_completed(h))) {
24976 + while((a = h->access->command_completed(h))) {
24977 a1 = a; a &= ~3;
24978 if ((c = h->cmpQ) == NULL)
24979 {
24980 @@ -1449,11 +1451,11 @@ static int sendcmd(
24981 /*
24982 * Disable interrupt
24983 */
24984 - info_p->access.set_intr_mask(info_p, 0);
24985 + info_p->access->set_intr_mask(info_p, 0);
24986 /* Make sure there is room in the command FIFO */
24987 /* Actually it should be completely empty at this time. */
24988 for (i = 200000; i > 0; i--) {
24989 - temp = info_p->access.fifo_full(info_p);
24990 + temp = info_p->access->fifo_full(info_p);
24991 if (temp != 0) {
24992 break;
24993 }
24994 @@ -1466,7 +1468,7 @@ DBG(
24995 /*
24996 * Send the cmd
24997 */
24998 - info_p->access.submit_command(info_p, c);
24999 + info_p->access->submit_command(info_p, c);
25000 complete = pollcomplete(ctlr);
25001
25002 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
25003 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
25004 * we check the new geometry. Then turn interrupts back on when
25005 * we're done.
25006 */
25007 - host->access.set_intr_mask(host, 0);
25008 + host->access->set_intr_mask(host, 0);
25009 getgeometry(ctlr);
25010 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
25011 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
25012
25013 for(i=0; i<NWD; i++) {
25014 struct gendisk *disk = ida_gendisk[ctlr][i];
25015 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
25016 /* Wait (up to 2 seconds) for a command to complete */
25017
25018 for (i = 200000; i > 0; i--) {
25019 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
25020 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
25021 if (done == 0) {
25022 udelay(10); /* a short fixed delay */
25023 } else
25024 diff -urNp linux-3.0.8/drivers/block/cpqarray.h linux-3.0.8/drivers/block/cpqarray.h
25025 --- linux-3.0.8/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
25026 +++ linux-3.0.8/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
25027 @@ -99,7 +99,7 @@ struct ctlr_info {
25028 drv_info_t drv[NWD];
25029 struct proc_dir_entry *proc;
25030
25031 - struct access_method access;
25032 + struct access_method *access;
25033
25034 cmdlist_t *reqQ;
25035 cmdlist_t *cmpQ;
25036 diff -urNp linux-3.0.8/drivers/block/DAC960.c linux-3.0.8/drivers/block/DAC960.c
25037 --- linux-3.0.8/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
25038 +++ linux-3.0.8/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
25039 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25040 unsigned long flags;
25041 int Channel, TargetID;
25042
25043 + pax_track_stack();
25044 +
25045 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25046 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25047 sizeof(DAC960_SCSI_Inquiry_T) +
25048 diff -urNp linux-3.0.8/drivers/block/drbd/drbd_int.h linux-3.0.8/drivers/block/drbd/drbd_int.h
25049 --- linux-3.0.8/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
25050 +++ linux-3.0.8/drivers/block/drbd/drbd_int.h 2011-10-06 04:17:55.000000000 -0400
25051 @@ -737,7 +737,7 @@ struct drbd_request;
25052 struct drbd_epoch {
25053 struct list_head list;
25054 unsigned int barrier_nr;
25055 - atomic_t epoch_size; /* increased on every request added. */
25056 + atomic_unchecked_t epoch_size; /* increased on every request added. */
25057 atomic_t active; /* increased on every req. added, and dec on every finished. */
25058 unsigned long flags;
25059 };
25060 @@ -1109,7 +1109,7 @@ struct drbd_conf {
25061 void *int_dig_in;
25062 void *int_dig_vv;
25063 wait_queue_head_t seq_wait;
25064 - atomic_t packet_seq;
25065 + atomic_unchecked_t packet_seq;
25066 unsigned int peer_seq;
25067 spinlock_t peer_seq_lock;
25068 unsigned int minor;
25069 @@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
25070
25071 static inline void drbd_tcp_cork(struct socket *sock)
25072 {
25073 - int __user val = 1;
25074 + int val = 1;
25075 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25076 - (char __user *)&val, sizeof(val));
25077 + (char __force_user *)&val, sizeof(val));
25078 }
25079
25080 static inline void drbd_tcp_uncork(struct socket *sock)
25081 {
25082 - int __user val = 0;
25083 + int val = 0;
25084 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25085 - (char __user *)&val, sizeof(val));
25086 + (char __force_user *)&val, sizeof(val));
25087 }
25088
25089 static inline void drbd_tcp_nodelay(struct socket *sock)
25090 {
25091 - int __user val = 1;
25092 + int val = 1;
25093 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
25094 - (char __user *)&val, sizeof(val));
25095 + (char __force_user *)&val, sizeof(val));
25096 }
25097
25098 static inline void drbd_tcp_quickack(struct socket *sock)
25099 {
25100 - int __user val = 2;
25101 + int val = 2;
25102 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
25103 - (char __user *)&val, sizeof(val));
25104 + (char __force_user *)&val, sizeof(val));
25105 }
25106
25107 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
25108 diff -urNp linux-3.0.8/drivers/block/drbd/drbd_main.c linux-3.0.8/drivers/block/drbd/drbd_main.c
25109 --- linux-3.0.8/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
25110 +++ linux-3.0.8/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
25111 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
25112 p.sector = sector;
25113 p.block_id = block_id;
25114 p.blksize = blksize;
25115 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
25116 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
25117
25118 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
25119 return false;
25120 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
25121 p.sector = cpu_to_be64(req->sector);
25122 p.block_id = (unsigned long)req;
25123 p.seq_num = cpu_to_be32(req->seq_num =
25124 - atomic_add_return(1, &mdev->packet_seq));
25125 + atomic_add_return_unchecked(1, &mdev->packet_seq));
25126
25127 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
25128
25129 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
25130 atomic_set(&mdev->unacked_cnt, 0);
25131 atomic_set(&mdev->local_cnt, 0);
25132 atomic_set(&mdev->net_cnt, 0);
25133 - atomic_set(&mdev->packet_seq, 0);
25134 + atomic_set_unchecked(&mdev->packet_seq, 0);
25135 atomic_set(&mdev->pp_in_use, 0);
25136 atomic_set(&mdev->pp_in_use_by_net, 0);
25137 atomic_set(&mdev->rs_sect_in, 0);
25138 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
25139 mdev->receiver.t_state);
25140
25141 /* no need to lock it, I'm the only thread alive */
25142 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
25143 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
25144 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
25145 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
25146 mdev->al_writ_cnt =
25147 mdev->bm_writ_cnt =
25148 mdev->read_cnt =
25149 diff -urNp linux-3.0.8/drivers/block/drbd/drbd_nl.c linux-3.0.8/drivers/block/drbd/drbd_nl.c
25150 --- linux-3.0.8/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
25151 +++ linux-3.0.8/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
25152 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
25153 module_put(THIS_MODULE);
25154 }
25155
25156 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25157 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25158
25159 static unsigned short *
25160 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
25161 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
25162 cn_reply->id.idx = CN_IDX_DRBD;
25163 cn_reply->id.val = CN_VAL_DRBD;
25164
25165 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25166 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25167 cn_reply->ack = 0; /* not used here. */
25168 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25169 (int)((char *)tl - (char *)reply->tag_list);
25170 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
25171 cn_reply->id.idx = CN_IDX_DRBD;
25172 cn_reply->id.val = CN_VAL_DRBD;
25173
25174 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25175 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25176 cn_reply->ack = 0; /* not used here. */
25177 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25178 (int)((char *)tl - (char *)reply->tag_list);
25179 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
25180 cn_reply->id.idx = CN_IDX_DRBD;
25181 cn_reply->id.val = CN_VAL_DRBD;
25182
25183 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
25184 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
25185 cn_reply->ack = 0; // not used here.
25186 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25187 (int)((char*)tl - (char*)reply->tag_list);
25188 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
25189 cn_reply->id.idx = CN_IDX_DRBD;
25190 cn_reply->id.val = CN_VAL_DRBD;
25191
25192 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25193 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25194 cn_reply->ack = 0; /* not used here. */
25195 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25196 (int)((char *)tl - (char *)reply->tag_list);
25197 diff -urNp linux-3.0.8/drivers/block/drbd/drbd_receiver.c linux-3.0.8/drivers/block/drbd/drbd_receiver.c
25198 --- linux-3.0.8/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
25199 +++ linux-3.0.8/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
25200 @@ -894,7 +894,7 @@ retry:
25201 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
25202 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
25203
25204 - atomic_set(&mdev->packet_seq, 0);
25205 + atomic_set_unchecked(&mdev->packet_seq, 0);
25206 mdev->peer_seq = 0;
25207
25208 drbd_thread_start(&mdev->asender);
25209 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
25210 do {
25211 next_epoch = NULL;
25212
25213 - epoch_size = atomic_read(&epoch->epoch_size);
25214 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
25215
25216 switch (ev & ~EV_CLEANUP) {
25217 case EV_PUT:
25218 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
25219 rv = FE_DESTROYED;
25220 } else {
25221 epoch->flags = 0;
25222 - atomic_set(&epoch->epoch_size, 0);
25223 + atomic_set_unchecked(&epoch->epoch_size, 0);
25224 /* atomic_set(&epoch->active, 0); is already zero */
25225 if (rv == FE_STILL_LIVE)
25226 rv = FE_RECYCLED;
25227 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
25228 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
25229 drbd_flush(mdev);
25230
25231 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
25232 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25233 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
25234 if (epoch)
25235 break;
25236 }
25237
25238 epoch = mdev->current_epoch;
25239 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
25240 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
25241
25242 D_ASSERT(atomic_read(&epoch->active) == 0);
25243 D_ASSERT(epoch->flags == 0);
25244 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
25245 }
25246
25247 epoch->flags = 0;
25248 - atomic_set(&epoch->epoch_size, 0);
25249 + atomic_set_unchecked(&epoch->epoch_size, 0);
25250 atomic_set(&epoch->active, 0);
25251
25252 spin_lock(&mdev->epoch_lock);
25253 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
25254 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25255 list_add(&epoch->list, &mdev->current_epoch->list);
25256 mdev->current_epoch = epoch;
25257 mdev->epochs++;
25258 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
25259 spin_unlock(&mdev->peer_seq_lock);
25260
25261 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
25262 - atomic_inc(&mdev->current_epoch->epoch_size);
25263 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
25264 return drbd_drain_block(mdev, data_size);
25265 }
25266
25267 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
25268
25269 spin_lock(&mdev->epoch_lock);
25270 e->epoch = mdev->current_epoch;
25271 - atomic_inc(&e->epoch->epoch_size);
25272 + atomic_inc_unchecked(&e->epoch->epoch_size);
25273 atomic_inc(&e->epoch->active);
25274 spin_unlock(&mdev->epoch_lock);
25275
25276 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
25277 D_ASSERT(list_empty(&mdev->done_ee));
25278
25279 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
25280 - atomic_set(&mdev->current_epoch->epoch_size, 0);
25281 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
25282 D_ASSERT(list_empty(&mdev->current_epoch->list));
25283 }
25284
25285 diff -urNp linux-3.0.8/drivers/block/loop.c linux-3.0.8/drivers/block/loop.c
25286 --- linux-3.0.8/drivers/block/loop.c 2011-10-24 08:05:23.000000000 -0400
25287 +++ linux-3.0.8/drivers/block/loop.c 2011-10-06 04:17:55.000000000 -0400
25288 @@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
25289 mm_segment_t old_fs = get_fs();
25290
25291 set_fs(get_ds());
25292 - bw = file->f_op->write(file, buf, len, &pos);
25293 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
25294 set_fs(old_fs);
25295 if (likely(bw == len))
25296 return 0;
25297 diff -urNp linux-3.0.8/drivers/block/nbd.c linux-3.0.8/drivers/block/nbd.c
25298 --- linux-3.0.8/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
25299 +++ linux-3.0.8/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
25300 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
25301 struct kvec iov;
25302 sigset_t blocked, oldset;
25303
25304 + pax_track_stack();
25305 +
25306 if (unlikely(!sock)) {
25307 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
25308 lo->disk->disk_name, (send ? "send" : "recv"));
25309 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
25310 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
25311 unsigned int cmd, unsigned long arg)
25312 {
25313 + pax_track_stack();
25314 +
25315 switch (cmd) {
25316 case NBD_DISCONNECT: {
25317 struct request sreq;
25318 diff -urNp linux-3.0.8/drivers/char/agp/frontend.c linux-3.0.8/drivers/char/agp/frontend.c
25319 --- linux-3.0.8/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
25320 +++ linux-3.0.8/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
25321 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
25322 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
25323 return -EFAULT;
25324
25325 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
25326 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
25327 return -EFAULT;
25328
25329 client = agp_find_client_by_pid(reserve.pid);
25330 diff -urNp linux-3.0.8/drivers/char/briq_panel.c linux-3.0.8/drivers/char/briq_panel.c
25331 --- linux-3.0.8/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
25332 +++ linux-3.0.8/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
25333 @@ -9,6 +9,7 @@
25334 #include <linux/types.h>
25335 #include <linux/errno.h>
25336 #include <linux/tty.h>
25337 +#include <linux/mutex.h>
25338 #include <linux/timer.h>
25339 #include <linux/kernel.h>
25340 #include <linux/wait.h>
25341 @@ -34,6 +35,7 @@ static int vfd_is_open;
25342 static unsigned char vfd[40];
25343 static int vfd_cursor;
25344 static unsigned char ledpb, led;
25345 +static DEFINE_MUTEX(vfd_mutex);
25346
25347 static void update_vfd(void)
25348 {
25349 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
25350 if (!vfd_is_open)
25351 return -EBUSY;
25352
25353 + mutex_lock(&vfd_mutex);
25354 for (;;) {
25355 char c;
25356 if (!indx)
25357 break;
25358 - if (get_user(c, buf))
25359 + if (get_user(c, buf)) {
25360 + mutex_unlock(&vfd_mutex);
25361 return -EFAULT;
25362 + }
25363 if (esc) {
25364 set_led(c);
25365 esc = 0;
25366 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
25367 buf++;
25368 }
25369 update_vfd();
25370 + mutex_unlock(&vfd_mutex);
25371
25372 return len;
25373 }
25374 diff -urNp linux-3.0.8/drivers/char/genrtc.c linux-3.0.8/drivers/char/genrtc.c
25375 --- linux-3.0.8/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
25376 +++ linux-3.0.8/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
25377 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
25378 switch (cmd) {
25379
25380 case RTC_PLL_GET:
25381 + memset(&pll, 0, sizeof(pll));
25382 if (get_rtc_pll(&pll))
25383 return -EINVAL;
25384 else
25385 diff -urNp linux-3.0.8/drivers/char/hpet.c linux-3.0.8/drivers/char/hpet.c
25386 --- linux-3.0.8/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
25387 +++ linux-3.0.8/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
25388 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
25389 }
25390
25391 static int
25392 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
25393 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
25394 struct hpet_info *info)
25395 {
25396 struct hpet_timer __iomem *timer;
25397 diff -urNp linux-3.0.8/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.8/drivers/char/ipmi/ipmi_msghandler.c
25398 --- linux-3.0.8/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
25399 +++ linux-3.0.8/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
25400 @@ -415,7 +415,7 @@ struct ipmi_smi {
25401 struct proc_dir_entry *proc_dir;
25402 char proc_dir_name[10];
25403
25404 - atomic_t stats[IPMI_NUM_STATS];
25405 + atomic_unchecked_t stats[IPMI_NUM_STATS];
25406
25407 /*
25408 * run_to_completion duplicate of smb_info, smi_info
25409 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
25410
25411
25412 #define ipmi_inc_stat(intf, stat) \
25413 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
25414 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
25415 #define ipmi_get_stat(intf, stat) \
25416 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
25417 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
25418
25419 static int is_lan_addr(struct ipmi_addr *addr)
25420 {
25421 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
25422 INIT_LIST_HEAD(&intf->cmd_rcvrs);
25423 init_waitqueue_head(&intf->waitq);
25424 for (i = 0; i < IPMI_NUM_STATS; i++)
25425 - atomic_set(&intf->stats[i], 0);
25426 + atomic_set_unchecked(&intf->stats[i], 0);
25427
25428 intf->proc_dir = NULL;
25429
25430 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
25431 struct ipmi_smi_msg smi_msg;
25432 struct ipmi_recv_msg recv_msg;
25433
25434 + pax_track_stack();
25435 +
25436 si = (struct ipmi_system_interface_addr *) &addr;
25437 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
25438 si->channel = IPMI_BMC_CHANNEL;
25439 diff -urNp linux-3.0.8/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.8/drivers/char/ipmi/ipmi_si_intf.c
25440 --- linux-3.0.8/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
25441 +++ linux-3.0.8/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
25442 @@ -277,7 +277,7 @@ struct smi_info {
25443 unsigned char slave_addr;
25444
25445 /* Counters and things for the proc filesystem. */
25446 - atomic_t stats[SI_NUM_STATS];
25447 + atomic_unchecked_t stats[SI_NUM_STATS];
25448
25449 struct task_struct *thread;
25450
25451 @@ -286,9 +286,9 @@ struct smi_info {
25452 };
25453
25454 #define smi_inc_stat(smi, stat) \
25455 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
25456 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
25457 #define smi_get_stat(smi, stat) \
25458 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
25459 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
25460
25461 #define SI_MAX_PARMS 4
25462
25463 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
25464 atomic_set(&new_smi->req_events, 0);
25465 new_smi->run_to_completion = 0;
25466 for (i = 0; i < SI_NUM_STATS; i++)
25467 - atomic_set(&new_smi->stats[i], 0);
25468 + atomic_set_unchecked(&new_smi->stats[i], 0);
25469
25470 new_smi->interrupt_disabled = 1;
25471 atomic_set(&new_smi->stop_operation, 0);
25472 diff -urNp linux-3.0.8/drivers/char/Kconfig linux-3.0.8/drivers/char/Kconfig
25473 --- linux-3.0.8/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
25474 +++ linux-3.0.8/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
25475 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
25476
25477 config DEVKMEM
25478 bool "/dev/kmem virtual device support"
25479 - default y
25480 + default n
25481 + depends on !GRKERNSEC_KMEM
25482 help
25483 Say Y here if you want to support the /dev/kmem device. The
25484 /dev/kmem device is rarely used, but can be used for certain
25485 @@ -596,6 +597,7 @@ config DEVPORT
25486 bool
25487 depends on !M68K
25488 depends on ISA || PCI
25489 + depends on !GRKERNSEC_KMEM
25490 default y
25491
25492 source "drivers/s390/char/Kconfig"
25493 diff -urNp linux-3.0.8/drivers/char/mbcs.c linux-3.0.8/drivers/char/mbcs.c
25494 --- linux-3.0.8/drivers/char/mbcs.c 2011-07-21 22:17:23.000000000 -0400
25495 +++ linux-3.0.8/drivers/char/mbcs.c 2011-10-11 10:44:33.000000000 -0400
25496 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
25497 return 0;
25498 }
25499
25500 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
25501 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
25502 {
25503 .part_num = MBCS_PART_NUM,
25504 .mfg_num = MBCS_MFG_NUM,
25505 diff -urNp linux-3.0.8/drivers/char/mem.c linux-3.0.8/drivers/char/mem.c
25506 --- linux-3.0.8/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
25507 +++ linux-3.0.8/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
25508 @@ -18,6 +18,7 @@
25509 #include <linux/raw.h>
25510 #include <linux/tty.h>
25511 #include <linux/capability.h>
25512 +#include <linux/security.h>
25513 #include <linux/ptrace.h>
25514 #include <linux/device.h>
25515 #include <linux/highmem.h>
25516 @@ -34,6 +35,10 @@
25517 # include <linux/efi.h>
25518 #endif
25519
25520 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25521 +extern struct file_operations grsec_fops;
25522 +#endif
25523 +
25524 static inline unsigned long size_inside_page(unsigned long start,
25525 unsigned long size)
25526 {
25527 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
25528
25529 while (cursor < to) {
25530 if (!devmem_is_allowed(pfn)) {
25531 +#ifdef CONFIG_GRKERNSEC_KMEM
25532 + gr_handle_mem_readwrite(from, to);
25533 +#else
25534 printk(KERN_INFO
25535 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25536 current->comm, from, to);
25537 +#endif
25538 return 0;
25539 }
25540 cursor += PAGE_SIZE;
25541 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
25542 }
25543 return 1;
25544 }
25545 +#elif defined(CONFIG_GRKERNSEC_KMEM)
25546 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25547 +{
25548 + return 0;
25549 +}
25550 #else
25551 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25552 {
25553 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
25554
25555 while (count > 0) {
25556 unsigned long remaining;
25557 + char *temp;
25558
25559 sz = size_inside_page(p, count);
25560
25561 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
25562 if (!ptr)
25563 return -EFAULT;
25564
25565 - remaining = copy_to_user(buf, ptr, sz);
25566 +#ifdef CONFIG_PAX_USERCOPY
25567 + temp = kmalloc(sz, GFP_KERNEL);
25568 + if (!temp) {
25569 + unxlate_dev_mem_ptr(p, ptr);
25570 + return -ENOMEM;
25571 + }
25572 + memcpy(temp, ptr, sz);
25573 +#else
25574 + temp = ptr;
25575 +#endif
25576 +
25577 + remaining = copy_to_user(buf, temp, sz);
25578 +
25579 +#ifdef CONFIG_PAX_USERCOPY
25580 + kfree(temp);
25581 +#endif
25582 +
25583 unxlate_dev_mem_ptr(p, ptr);
25584 if (remaining)
25585 return -EFAULT;
25586 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25587 size_t count, loff_t *ppos)
25588 {
25589 unsigned long p = *ppos;
25590 - ssize_t low_count, read, sz;
25591 + ssize_t low_count, read, sz, err = 0;
25592 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25593 - int err = 0;
25594
25595 read = 0;
25596 if (p < (unsigned long) high_memory) {
25597 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25598 }
25599 #endif
25600 while (low_count > 0) {
25601 + char *temp;
25602 +
25603 sz = size_inside_page(p, low_count);
25604
25605 /*
25606 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25607 */
25608 kbuf = xlate_dev_kmem_ptr((char *)p);
25609
25610 - if (copy_to_user(buf, kbuf, sz))
25611 +#ifdef CONFIG_PAX_USERCOPY
25612 + temp = kmalloc(sz, GFP_KERNEL);
25613 + if (!temp)
25614 + return -ENOMEM;
25615 + memcpy(temp, kbuf, sz);
25616 +#else
25617 + temp = kbuf;
25618 +#endif
25619 +
25620 + err = copy_to_user(buf, temp, sz);
25621 +
25622 +#ifdef CONFIG_PAX_USERCOPY
25623 + kfree(temp);
25624 +#endif
25625 +
25626 + if (err)
25627 return -EFAULT;
25628 buf += sz;
25629 p += sz;
25630 @@ -866,6 +913,9 @@ static const struct memdev {
25631 #ifdef CONFIG_CRASH_DUMP
25632 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25633 #endif
25634 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25635 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25636 +#endif
25637 };
25638
25639 static int memory_open(struct inode *inode, struct file *filp)
25640 diff -urNp linux-3.0.8/drivers/char/nvram.c linux-3.0.8/drivers/char/nvram.c
25641 --- linux-3.0.8/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
25642 +++ linux-3.0.8/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
25643 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
25644
25645 spin_unlock_irq(&rtc_lock);
25646
25647 - if (copy_to_user(buf, contents, tmp - contents))
25648 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25649 return -EFAULT;
25650
25651 *ppos = i;
25652 diff -urNp linux-3.0.8/drivers/char/random.c linux-3.0.8/drivers/char/random.c
25653 --- linux-3.0.8/drivers/char/random.c 2011-10-24 08:05:21.000000000 -0400
25654 +++ linux-3.0.8/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
25655 @@ -261,8 +261,13 @@
25656 /*
25657 * Configuration information
25658 */
25659 +#ifdef CONFIG_GRKERNSEC_RANDNET
25660 +#define INPUT_POOL_WORDS 512
25661 +#define OUTPUT_POOL_WORDS 128
25662 +#else
25663 #define INPUT_POOL_WORDS 128
25664 #define OUTPUT_POOL_WORDS 32
25665 +#endif
25666 #define SEC_XFER_SIZE 512
25667 #define EXTRACT_SIZE 10
25668
25669 @@ -300,10 +305,17 @@ static struct poolinfo {
25670 int poolwords;
25671 int tap1, tap2, tap3, tap4, tap5;
25672 } poolinfo_table[] = {
25673 +#ifdef CONFIG_GRKERNSEC_RANDNET
25674 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25675 + { 512, 411, 308, 208, 104, 1 },
25676 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25677 + { 128, 103, 76, 51, 25, 1 },
25678 +#else
25679 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25680 { 128, 103, 76, 51, 25, 1 },
25681 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25682 { 32, 26, 20, 14, 7, 1 },
25683 +#endif
25684 #if 0
25685 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25686 { 2048, 1638, 1231, 819, 411, 1 },
25687 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25688
25689 extract_buf(r, tmp);
25690 i = min_t(int, nbytes, EXTRACT_SIZE);
25691 - if (copy_to_user(buf, tmp, i)) {
25692 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25693 ret = -EFAULT;
25694 break;
25695 }
25696 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25697 #include <linux/sysctl.h>
25698
25699 static int min_read_thresh = 8, min_write_thresh;
25700 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
25701 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25702 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25703 static char sysctl_bootid[16];
25704
25705 diff -urNp linux-3.0.8/drivers/char/sonypi.c linux-3.0.8/drivers/char/sonypi.c
25706 --- linux-3.0.8/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
25707 +++ linux-3.0.8/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
25708 @@ -55,6 +55,7 @@
25709 #include <asm/uaccess.h>
25710 #include <asm/io.h>
25711 #include <asm/system.h>
25712 +#include <asm/local.h>
25713
25714 #include <linux/sonypi.h>
25715
25716 @@ -491,7 +492,7 @@ static struct sonypi_device {
25717 spinlock_t fifo_lock;
25718 wait_queue_head_t fifo_proc_list;
25719 struct fasync_struct *fifo_async;
25720 - int open_count;
25721 + local_t open_count;
25722 int model;
25723 struct input_dev *input_jog_dev;
25724 struct input_dev *input_key_dev;
25725 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25726 static int sonypi_misc_release(struct inode *inode, struct file *file)
25727 {
25728 mutex_lock(&sonypi_device.lock);
25729 - sonypi_device.open_count--;
25730 + local_dec(&sonypi_device.open_count);
25731 mutex_unlock(&sonypi_device.lock);
25732 return 0;
25733 }
25734 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25735 {
25736 mutex_lock(&sonypi_device.lock);
25737 /* Flush input queue on first open */
25738 - if (!sonypi_device.open_count)
25739 + if (!local_read(&sonypi_device.open_count))
25740 kfifo_reset(&sonypi_device.fifo);
25741 - sonypi_device.open_count++;
25742 + local_inc(&sonypi_device.open_count);
25743 mutex_unlock(&sonypi_device.lock);
25744
25745 return 0;
25746 diff -urNp linux-3.0.8/drivers/char/tpm/tpm_bios.c linux-3.0.8/drivers/char/tpm/tpm_bios.c
25747 --- linux-3.0.8/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
25748 +++ linux-3.0.8/drivers/char/tpm/tpm_bios.c 2011-10-06 04:17:55.000000000 -0400
25749 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25750 event = addr;
25751
25752 if ((event->event_type == 0 && event->event_size == 0) ||
25753 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25754 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25755 return NULL;
25756
25757 return addr;
25758 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25759 return NULL;
25760
25761 if ((event->event_type == 0 && event->event_size == 0) ||
25762 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25763 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25764 return NULL;
25765
25766 (*pos)++;
25767 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25768 int i;
25769
25770 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25771 - seq_putc(m, data[i]);
25772 + if (!seq_putc(m, data[i]))
25773 + return -EFAULT;
25774
25775 return 0;
25776 }
25777 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25778 log->bios_event_log_end = log->bios_event_log + len;
25779
25780 virt = acpi_os_map_memory(start, len);
25781 + if (!virt) {
25782 + kfree(log->bios_event_log);
25783 + log->bios_event_log = NULL;
25784 + return -EFAULT;
25785 + }
25786
25787 - memcpy(log->bios_event_log, virt, len);
25788 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25789
25790 acpi_os_unmap_memory(virt, len);
25791 return 0;
25792 diff -urNp linux-3.0.8/drivers/char/tpm/tpm.c linux-3.0.8/drivers/char/tpm/tpm.c
25793 --- linux-3.0.8/drivers/char/tpm/tpm.c 2011-10-24 08:05:30.000000000 -0400
25794 +++ linux-3.0.8/drivers/char/tpm/tpm.c 2011-10-16 21:55:27.000000000 -0400
25795 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25796 chip->vendor.req_complete_val)
25797 goto out_recv;
25798
25799 - if ((status == chip->vendor.req_canceled)) {
25800 + if (status == chip->vendor.req_canceled) {
25801 dev_err(chip->dev, "Operation Canceled\n");
25802 rc = -ECANCELED;
25803 goto out;
25804 @@ -847,6 +847,8 @@ ssize_t tpm_show_pubek(struct device *de
25805
25806 struct tpm_chip *chip = dev_get_drvdata(dev);
25807
25808 + pax_track_stack();
25809 +
25810 tpm_cmd.header.in = tpm_readpubek_header;
25811 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25812 "attempting to read the PUBEK");
25813 diff -urNp linux-3.0.8/drivers/char/virtio_console.c linux-3.0.8/drivers/char/virtio_console.c
25814 --- linux-3.0.8/drivers/char/virtio_console.c 2011-07-21 22:17:23.000000000 -0400
25815 +++ linux-3.0.8/drivers/char/virtio_console.c 2011-10-06 04:17:55.000000000 -0400
25816 @@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25817 if (to_user) {
25818 ssize_t ret;
25819
25820 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25821 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25822 if (ret)
25823 return -EFAULT;
25824 } else {
25825 @@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25826 if (!port_has_data(port) && !port->host_connected)
25827 return 0;
25828
25829 - return fill_readbuf(port, ubuf, count, true);
25830 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25831 }
25832
25833 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25834 diff -urNp linux-3.0.8/drivers/crypto/hifn_795x.c linux-3.0.8/drivers/crypto/hifn_795x.c
25835 --- linux-3.0.8/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
25836 +++ linux-3.0.8/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
25837 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25838 0xCA, 0x34, 0x2B, 0x2E};
25839 struct scatterlist sg;
25840
25841 + pax_track_stack();
25842 +
25843 memset(src, 0, sizeof(src));
25844 memset(ctx.key, 0, sizeof(ctx.key));
25845
25846 diff -urNp linux-3.0.8/drivers/crypto/padlock-aes.c linux-3.0.8/drivers/crypto/padlock-aes.c
25847 --- linux-3.0.8/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
25848 +++ linux-3.0.8/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
25849 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25850 struct crypto_aes_ctx gen_aes;
25851 int cpu;
25852
25853 + pax_track_stack();
25854 +
25855 if (key_len % 8) {
25856 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25857 return -EINVAL;
25858 diff -urNp linux-3.0.8/drivers/dma/ioat/dma_v3.c linux-3.0.8/drivers/dma/ioat/dma_v3.c
25859 --- linux-3.0.8/drivers/dma/ioat/dma_v3.c 2011-07-21 22:17:23.000000000 -0400
25860 +++ linux-3.0.8/drivers/dma/ioat/dma_v3.c 2011-10-11 10:44:33.000000000 -0400
25861 @@ -73,10 +73,10 @@
25862 /* provide a lookup table for setting the source address in the base or
25863 * extended descriptor of an xor or pq descriptor
25864 */
25865 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
25866 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
25867 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
25868 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
25869 +static const u8 xor_idx_to_desc = 0xd0;
25870 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
25871 +static const u8 pq_idx_to_desc = 0xf8;
25872 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
25873
25874 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
25875 {
25876 diff -urNp linux-3.0.8/drivers/edac/amd64_edac.c linux-3.0.8/drivers/edac/amd64_edac.c
25877 --- linux-3.0.8/drivers/edac/amd64_edac.c 2011-07-21 22:17:23.000000000 -0400
25878 +++ linux-3.0.8/drivers/edac/amd64_edac.c 2011-10-11 10:44:33.000000000 -0400
25879 @@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25880 * PCI core identifies what devices are on a system during boot, and then
25881 * inquiry this table to see if this driver is for a given device found.
25882 */
25883 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25884 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25885 {
25886 .vendor = PCI_VENDOR_ID_AMD,
25887 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25888 diff -urNp linux-3.0.8/drivers/edac/amd76x_edac.c linux-3.0.8/drivers/edac/amd76x_edac.c
25889 --- linux-3.0.8/drivers/edac/amd76x_edac.c 2011-07-21 22:17:23.000000000 -0400
25890 +++ linux-3.0.8/drivers/edac/amd76x_edac.c 2011-10-11 10:44:33.000000000 -0400
25891 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25892 edac_mc_free(mci);
25893 }
25894
25895 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25896 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25897 {
25898 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25899 AMD762},
25900 diff -urNp linux-3.0.8/drivers/edac/e752x_edac.c linux-3.0.8/drivers/edac/e752x_edac.c
25901 --- linux-3.0.8/drivers/edac/e752x_edac.c 2011-07-21 22:17:23.000000000 -0400
25902 +++ linux-3.0.8/drivers/edac/e752x_edac.c 2011-10-11 10:44:33.000000000 -0400
25903 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25904 edac_mc_free(mci);
25905 }
25906
25907 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25908 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25909 {
25910 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25911 E7520},
25912 diff -urNp linux-3.0.8/drivers/edac/e7xxx_edac.c linux-3.0.8/drivers/edac/e7xxx_edac.c
25913 --- linux-3.0.8/drivers/edac/e7xxx_edac.c 2011-07-21 22:17:23.000000000 -0400
25914 +++ linux-3.0.8/drivers/edac/e7xxx_edac.c 2011-10-11 10:44:33.000000000 -0400
25915 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25916 edac_mc_free(mci);
25917 }
25918
25919 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25920 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25921 {
25922 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25923 E7205},
25924 diff -urNp linux-3.0.8/drivers/edac/edac_pci_sysfs.c linux-3.0.8/drivers/edac/edac_pci_sysfs.c
25925 --- linux-3.0.8/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
25926 +++ linux-3.0.8/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
25927 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25928 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25929 static int edac_pci_poll_msec = 1000; /* one second workq period */
25930
25931 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
25932 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25933 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25934 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25935
25936 static struct kobject *edac_pci_top_main_kobj;
25937 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25938 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25939 edac_printk(KERN_CRIT, EDAC_PCI,
25940 "Signaled System Error on %s\n",
25941 pci_name(dev));
25942 - atomic_inc(&pci_nonparity_count);
25943 + atomic_inc_unchecked(&pci_nonparity_count);
25944 }
25945
25946 if (status & (PCI_STATUS_PARITY)) {
25947 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25948 "Master Data Parity Error on %s\n",
25949 pci_name(dev));
25950
25951 - atomic_inc(&pci_parity_count);
25952 + atomic_inc_unchecked(&pci_parity_count);
25953 }
25954
25955 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25956 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25957 "Detected Parity Error on %s\n",
25958 pci_name(dev));
25959
25960 - atomic_inc(&pci_parity_count);
25961 + atomic_inc_unchecked(&pci_parity_count);
25962 }
25963 }
25964
25965 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25966 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25967 "Signaled System Error on %s\n",
25968 pci_name(dev));
25969 - atomic_inc(&pci_nonparity_count);
25970 + atomic_inc_unchecked(&pci_nonparity_count);
25971 }
25972
25973 if (status & (PCI_STATUS_PARITY)) {
25974 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25975 "Master Data Parity Error on "
25976 "%s\n", pci_name(dev));
25977
25978 - atomic_inc(&pci_parity_count);
25979 + atomic_inc_unchecked(&pci_parity_count);
25980 }
25981
25982 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25983 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25984 "Detected Parity Error on %s\n",
25985 pci_name(dev));
25986
25987 - atomic_inc(&pci_parity_count);
25988 + atomic_inc_unchecked(&pci_parity_count);
25989 }
25990 }
25991 }
25992 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25993 if (!check_pci_errors)
25994 return;
25995
25996 - before_count = atomic_read(&pci_parity_count);
25997 + before_count = atomic_read_unchecked(&pci_parity_count);
25998
25999 /* scan all PCI devices looking for a Parity Error on devices and
26000 * bridges.
26001 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
26002 /* Only if operator has selected panic on PCI Error */
26003 if (edac_pci_get_panic_on_pe()) {
26004 /* If the count is different 'after' from 'before' */
26005 - if (before_count != atomic_read(&pci_parity_count))
26006 + if (before_count != atomic_read_unchecked(&pci_parity_count))
26007 panic("EDAC: PCI Parity Error");
26008 }
26009 }
26010 diff -urNp linux-3.0.8/drivers/edac/i3000_edac.c linux-3.0.8/drivers/edac/i3000_edac.c
26011 --- linux-3.0.8/drivers/edac/i3000_edac.c 2011-07-21 22:17:23.000000000 -0400
26012 +++ linux-3.0.8/drivers/edac/i3000_edac.c 2011-10-11 10:44:33.000000000 -0400
26013 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
26014 edac_mc_free(mci);
26015 }
26016
26017 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
26018 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
26019 {
26020 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26021 I3000},
26022 diff -urNp linux-3.0.8/drivers/edac/i3200_edac.c linux-3.0.8/drivers/edac/i3200_edac.c
26023 --- linux-3.0.8/drivers/edac/i3200_edac.c 2011-07-21 22:17:23.000000000 -0400
26024 +++ linux-3.0.8/drivers/edac/i3200_edac.c 2011-10-11 10:44:33.000000000 -0400
26025 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
26026 edac_mc_free(mci);
26027 }
26028
26029 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
26030 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
26031 {
26032 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26033 I3200},
26034 diff -urNp linux-3.0.8/drivers/edac/i5000_edac.c linux-3.0.8/drivers/edac/i5000_edac.c
26035 --- linux-3.0.8/drivers/edac/i5000_edac.c 2011-07-21 22:17:23.000000000 -0400
26036 +++ linux-3.0.8/drivers/edac/i5000_edac.c 2011-10-11 10:44:33.000000000 -0400
26037 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
26038 *
26039 * The "E500P" device is the first device supported.
26040 */
26041 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
26042 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
26043 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
26044 .driver_data = I5000P},
26045
26046 diff -urNp linux-3.0.8/drivers/edac/i5100_edac.c linux-3.0.8/drivers/edac/i5100_edac.c
26047 --- linux-3.0.8/drivers/edac/i5100_edac.c 2011-07-21 22:17:23.000000000 -0400
26048 +++ linux-3.0.8/drivers/edac/i5100_edac.c 2011-10-11 10:44:33.000000000 -0400
26049 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
26050 edac_mc_free(mci);
26051 }
26052
26053 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
26054 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
26055 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
26056 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
26057 { 0, }
26058 diff -urNp linux-3.0.8/drivers/edac/i5400_edac.c linux-3.0.8/drivers/edac/i5400_edac.c
26059 --- linux-3.0.8/drivers/edac/i5400_edac.c 2011-07-21 22:17:23.000000000 -0400
26060 +++ linux-3.0.8/drivers/edac/i5400_edac.c 2011-10-11 10:44:33.000000000 -0400
26061 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
26062 *
26063 * The "E500P" device is the first device supported.
26064 */
26065 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
26066 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
26067 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
26068 {0,} /* 0 terminated list. */
26069 };
26070 diff -urNp linux-3.0.8/drivers/edac/i7300_edac.c linux-3.0.8/drivers/edac/i7300_edac.c
26071 --- linux-3.0.8/drivers/edac/i7300_edac.c 2011-07-21 22:17:23.000000000 -0400
26072 +++ linux-3.0.8/drivers/edac/i7300_edac.c 2011-10-11 10:44:33.000000000 -0400
26073 @@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
26074 *
26075 * Has only 8086:360c PCI ID
26076 */
26077 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
26078 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
26079 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
26080 {0,} /* 0 terminated list. */
26081 };
26082 diff -urNp linux-3.0.8/drivers/edac/i7core_edac.c linux-3.0.8/drivers/edac/i7core_edac.c
26083 --- linux-3.0.8/drivers/edac/i7core_edac.c 2011-10-24 08:05:23.000000000 -0400
26084 +++ linux-3.0.8/drivers/edac/i7core_edac.c 2011-10-11 10:44:33.000000000 -0400
26085 @@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
26086 /*
26087 * pci_device_id table for which devices we are looking for
26088 */
26089 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
26090 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
26091 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
26092 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
26093 {0,} /* 0 terminated list. */
26094 diff -urNp linux-3.0.8/drivers/edac/i82443bxgx_edac.c linux-3.0.8/drivers/edac/i82443bxgx_edac.c
26095 --- linux-3.0.8/drivers/edac/i82443bxgx_edac.c 2011-07-21 22:17:23.000000000 -0400
26096 +++ linux-3.0.8/drivers/edac/i82443bxgx_edac.c 2011-10-11 10:44:33.000000000 -0400
26097 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
26098
26099 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
26100
26101 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
26102 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
26103 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
26104 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
26105 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
26106 diff -urNp linux-3.0.8/drivers/edac/i82860_edac.c linux-3.0.8/drivers/edac/i82860_edac.c
26107 --- linux-3.0.8/drivers/edac/i82860_edac.c 2011-07-21 22:17:23.000000000 -0400
26108 +++ linux-3.0.8/drivers/edac/i82860_edac.c 2011-10-11 10:44:33.000000000 -0400
26109 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
26110 edac_mc_free(mci);
26111 }
26112
26113 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
26114 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
26115 {
26116 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26117 I82860},
26118 diff -urNp linux-3.0.8/drivers/edac/i82875p_edac.c linux-3.0.8/drivers/edac/i82875p_edac.c
26119 --- linux-3.0.8/drivers/edac/i82875p_edac.c 2011-07-21 22:17:23.000000000 -0400
26120 +++ linux-3.0.8/drivers/edac/i82875p_edac.c 2011-10-11 10:44:33.000000000 -0400
26121 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
26122 edac_mc_free(mci);
26123 }
26124
26125 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
26126 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
26127 {
26128 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26129 I82875P},
26130 diff -urNp linux-3.0.8/drivers/edac/i82975x_edac.c linux-3.0.8/drivers/edac/i82975x_edac.c
26131 --- linux-3.0.8/drivers/edac/i82975x_edac.c 2011-07-21 22:17:23.000000000 -0400
26132 +++ linux-3.0.8/drivers/edac/i82975x_edac.c 2011-10-11 10:44:33.000000000 -0400
26133 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
26134 edac_mc_free(mci);
26135 }
26136
26137 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
26138 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
26139 {
26140 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26141 I82975X
26142 diff -urNp linux-3.0.8/drivers/edac/mce_amd.h linux-3.0.8/drivers/edac/mce_amd.h
26143 --- linux-3.0.8/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
26144 +++ linux-3.0.8/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
26145 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
26146 bool (*dc_mce)(u16, u8);
26147 bool (*ic_mce)(u16, u8);
26148 bool (*nb_mce)(u16, u8);
26149 -};
26150 +} __no_const;
26151
26152 void amd_report_gart_errors(bool);
26153 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
26154 diff -urNp linux-3.0.8/drivers/edac/r82600_edac.c linux-3.0.8/drivers/edac/r82600_edac.c
26155 --- linux-3.0.8/drivers/edac/r82600_edac.c 2011-07-21 22:17:23.000000000 -0400
26156 +++ linux-3.0.8/drivers/edac/r82600_edac.c 2011-10-11 10:44:33.000000000 -0400
26157 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
26158 edac_mc_free(mci);
26159 }
26160
26161 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
26162 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
26163 {
26164 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
26165 },
26166 diff -urNp linux-3.0.8/drivers/edac/x38_edac.c linux-3.0.8/drivers/edac/x38_edac.c
26167 --- linux-3.0.8/drivers/edac/x38_edac.c 2011-07-21 22:17:23.000000000 -0400
26168 +++ linux-3.0.8/drivers/edac/x38_edac.c 2011-10-11 10:44:33.000000000 -0400
26169 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
26170 edac_mc_free(mci);
26171 }
26172
26173 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
26174 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
26175 {
26176 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26177 X38},
26178 diff -urNp linux-3.0.8/drivers/firewire/core-card.c linux-3.0.8/drivers/firewire/core-card.c
26179 --- linux-3.0.8/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
26180 +++ linux-3.0.8/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
26181 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
26182
26183 void fw_core_remove_card(struct fw_card *card)
26184 {
26185 - struct fw_card_driver dummy_driver = dummy_driver_template;
26186 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
26187
26188 card->driver->update_phy_reg(card, 4,
26189 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
26190 diff -urNp linux-3.0.8/drivers/firewire/core-cdev.c linux-3.0.8/drivers/firewire/core-cdev.c
26191 --- linux-3.0.8/drivers/firewire/core-cdev.c 2011-10-24 08:05:21.000000000 -0400
26192 +++ linux-3.0.8/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
26193 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
26194 int ret;
26195
26196 if ((request->channels == 0 && request->bandwidth == 0) ||
26197 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
26198 - request->bandwidth < 0)
26199 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
26200 return -EINVAL;
26201
26202 r = kmalloc(sizeof(*r), GFP_KERNEL);
26203 diff -urNp linux-3.0.8/drivers/firewire/core.h linux-3.0.8/drivers/firewire/core.h
26204 --- linux-3.0.8/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
26205 +++ linux-3.0.8/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
26206 @@ -101,6 +101,7 @@ struct fw_card_driver {
26207
26208 int (*stop_iso)(struct fw_iso_context *ctx);
26209 };
26210 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
26211
26212 void fw_card_initialize(struct fw_card *card,
26213 const struct fw_card_driver *driver, struct device *device);
26214 diff -urNp linux-3.0.8/drivers/firewire/core-transaction.c linux-3.0.8/drivers/firewire/core-transaction.c
26215 --- linux-3.0.8/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
26216 +++ linux-3.0.8/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
26217 @@ -37,6 +37,7 @@
26218 #include <linux/timer.h>
26219 #include <linux/types.h>
26220 #include <linux/workqueue.h>
26221 +#include <linux/sched.h>
26222
26223 #include <asm/byteorder.h>
26224
26225 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
26226 struct transaction_callback_data d;
26227 struct fw_transaction t;
26228
26229 + pax_track_stack();
26230 +
26231 init_timer_on_stack(&t.split_timeout_timer);
26232 init_completion(&d.done);
26233 d.payload = payload;
26234 diff -urNp linux-3.0.8/drivers/firmware/dmi_scan.c linux-3.0.8/drivers/firmware/dmi_scan.c
26235 --- linux-3.0.8/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
26236 +++ linux-3.0.8/drivers/firmware/dmi_scan.c 2011-10-06 04:17:55.000000000 -0400
26237 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
26238 }
26239 }
26240 else {
26241 - /*
26242 - * no iounmap() for that ioremap(); it would be a no-op, but
26243 - * it's so early in setup that sucker gets confused into doing
26244 - * what it shouldn't if we actually call it.
26245 - */
26246 p = dmi_ioremap(0xF0000, 0x10000);
26247 if (p == NULL)
26248 goto error;
26249 @@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
26250 if (buf == NULL)
26251 return -1;
26252
26253 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
26254 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
26255
26256 iounmap(buf);
26257 return 0;
26258 diff -urNp linux-3.0.8/drivers/gpio/vr41xx_giu.c linux-3.0.8/drivers/gpio/vr41xx_giu.c
26259 --- linux-3.0.8/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
26260 +++ linux-3.0.8/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
26261 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
26262 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
26263 maskl, pendl, maskh, pendh);
26264
26265 - atomic_inc(&irq_err_count);
26266 + atomic_inc_unchecked(&irq_err_count);
26267
26268 return -EINVAL;
26269 }
26270 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_crtc.c linux-3.0.8/drivers/gpu/drm/drm_crtc.c
26271 --- linux-3.0.8/drivers/gpu/drm/drm_crtc.c 2011-07-21 22:17:23.000000000 -0400
26272 +++ linux-3.0.8/drivers/gpu/drm/drm_crtc.c 2011-10-06 04:17:55.000000000 -0400
26273 @@ -1372,7 +1372,7 @@ int drm_mode_getconnector(struct drm_dev
26274 */
26275 if ((out_resp->count_modes >= mode_count) && mode_count) {
26276 copied = 0;
26277 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
26278 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
26279 list_for_each_entry(mode, &connector->modes, head) {
26280 drm_crtc_convert_to_umode(&u_mode, mode);
26281 if (copy_to_user(mode_ptr + copied,
26282 @@ -1387,8 +1387,8 @@ int drm_mode_getconnector(struct drm_dev
26283
26284 if ((out_resp->count_props >= props_count) && props_count) {
26285 copied = 0;
26286 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
26287 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
26288 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
26289 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
26290 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
26291 if (connector->property_ids[i] != 0) {
26292 if (put_user(connector->property_ids[i],
26293 @@ -1410,7 +1410,7 @@ int drm_mode_getconnector(struct drm_dev
26294
26295 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
26296 copied = 0;
26297 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
26298 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
26299 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
26300 if (connector->encoder_ids[i] != 0) {
26301 if (put_user(connector->encoder_ids[i],
26302 @@ -1569,7 +1569,7 @@ int drm_mode_setcrtc(struct drm_device *
26303 }
26304
26305 for (i = 0; i < crtc_req->count_connectors; i++) {
26306 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
26307 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
26308 if (get_user(out_id, &set_connectors_ptr[i])) {
26309 ret = -EFAULT;
26310 goto out;
26311 @@ -1850,7 +1850,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
26312 fb = obj_to_fb(obj);
26313
26314 num_clips = r->num_clips;
26315 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
26316 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
26317
26318 if (!num_clips != !clips_ptr) {
26319 ret = -EINVAL;
26320 @@ -2270,7 +2270,7 @@ int drm_mode_getproperty_ioctl(struct dr
26321 out_resp->flags = property->flags;
26322
26323 if ((out_resp->count_values >= value_count) && value_count) {
26324 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
26325 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
26326 for (i = 0; i < value_count; i++) {
26327 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
26328 ret = -EFAULT;
26329 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct dr
26330 if (property->flags & DRM_MODE_PROP_ENUM) {
26331 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
26332 copied = 0;
26333 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
26334 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
26335 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
26336
26337 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
26338 @@ -2306,7 +2306,7 @@ int drm_mode_getproperty_ioctl(struct dr
26339 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
26340 copied = 0;
26341 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
26342 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
26343 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
26344
26345 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
26346 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
26347 @@ -2367,7 +2367,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26348 struct drm_mode_get_blob *out_resp = data;
26349 struct drm_property_blob *blob;
26350 int ret = 0;
26351 - void *blob_ptr;
26352 + void __user *blob_ptr;
26353
26354 if (!drm_core_check_feature(dev, DRIVER_MODESET))
26355 return -EINVAL;
26356 @@ -2381,7 +2381,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26357 blob = obj_to_blob(obj);
26358
26359 if (out_resp->length == blob->length) {
26360 - blob_ptr = (void *)(unsigned long)out_resp->data;
26361 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
26362 if (copy_to_user(blob_ptr, blob->data, blob->length)){
26363 ret = -EFAULT;
26364 goto done;
26365 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.8/drivers/gpu/drm/drm_crtc_helper.c
26366 --- linux-3.0.8/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
26367 +++ linux-3.0.8/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
26368 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
26369 struct drm_crtc *tmp;
26370 int crtc_mask = 1;
26371
26372 - WARN(!crtc, "checking null crtc?\n");
26373 + BUG_ON(!crtc);
26374
26375 dev = crtc->dev;
26376
26377 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
26378 struct drm_encoder *encoder;
26379 bool ret = true;
26380
26381 + pax_track_stack();
26382 +
26383 crtc->enabled = drm_helper_crtc_in_use(crtc);
26384 if (!crtc->enabled)
26385 return true;
26386 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_drv.c linux-3.0.8/drivers/gpu/drm/drm_drv.c
26387 --- linux-3.0.8/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
26388 +++ linux-3.0.8/drivers/gpu/drm/drm_drv.c 2011-10-06 04:17:55.000000000 -0400
26389 @@ -307,7 +307,7 @@ module_exit(drm_core_exit);
26390 /**
26391 * Copy and IOCTL return string to user space
26392 */
26393 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
26394 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
26395 {
26396 int len;
26397
26398 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
26399
26400 dev = file_priv->minor->dev;
26401 atomic_inc(&dev->ioctl_count);
26402 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
26403 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
26404 ++file_priv->ioctl_count;
26405
26406 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
26407 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_fops.c linux-3.0.8/drivers/gpu/drm/drm_fops.c
26408 --- linux-3.0.8/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
26409 +++ linux-3.0.8/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
26410 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
26411 }
26412
26413 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
26414 - atomic_set(&dev->counts[i], 0);
26415 + atomic_set_unchecked(&dev->counts[i], 0);
26416
26417 dev->sigdata.lock = NULL;
26418
26419 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
26420
26421 retcode = drm_open_helper(inode, filp, dev);
26422 if (!retcode) {
26423 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
26424 - if (!dev->open_count++)
26425 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
26426 + if (local_inc_return(&dev->open_count) == 1)
26427 retcode = drm_setup(dev);
26428 }
26429 if (!retcode) {
26430 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
26431
26432 mutex_lock(&drm_global_mutex);
26433
26434 - DRM_DEBUG("open_count = %d\n", dev->open_count);
26435 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
26436
26437 if (dev->driver->preclose)
26438 dev->driver->preclose(dev, file_priv);
26439 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
26440 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
26441 task_pid_nr(current),
26442 (long)old_encode_dev(file_priv->minor->device),
26443 - dev->open_count);
26444 + local_read(&dev->open_count));
26445
26446 /* if the master has gone away we can't do anything with the lock */
26447 if (file_priv->minor->master)
26448 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
26449 * End inline drm_release
26450 */
26451
26452 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
26453 - if (!--dev->open_count) {
26454 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
26455 + if (local_dec_and_test(&dev->open_count)) {
26456 if (atomic_read(&dev->ioctl_count)) {
26457 DRM_ERROR("Device busy: %d\n",
26458 atomic_read(&dev->ioctl_count));
26459 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_global.c linux-3.0.8/drivers/gpu/drm/drm_global.c
26460 --- linux-3.0.8/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
26461 +++ linux-3.0.8/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
26462 @@ -36,7 +36,7 @@
26463 struct drm_global_item {
26464 struct mutex mutex;
26465 void *object;
26466 - int refcount;
26467 + atomic_t refcount;
26468 };
26469
26470 static struct drm_global_item glob[DRM_GLOBAL_NUM];
26471 @@ -49,7 +49,7 @@ void drm_global_init(void)
26472 struct drm_global_item *item = &glob[i];
26473 mutex_init(&item->mutex);
26474 item->object = NULL;
26475 - item->refcount = 0;
26476 + atomic_set(&item->refcount, 0);
26477 }
26478 }
26479
26480 @@ -59,7 +59,7 @@ void drm_global_release(void)
26481 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
26482 struct drm_global_item *item = &glob[i];
26483 BUG_ON(item->object != NULL);
26484 - BUG_ON(item->refcount != 0);
26485 + BUG_ON(atomic_read(&item->refcount) != 0);
26486 }
26487 }
26488
26489 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
26490 void *object;
26491
26492 mutex_lock(&item->mutex);
26493 - if (item->refcount == 0) {
26494 + if (atomic_read(&item->refcount) == 0) {
26495 item->object = kzalloc(ref->size, GFP_KERNEL);
26496 if (unlikely(item->object == NULL)) {
26497 ret = -ENOMEM;
26498 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
26499 goto out_err;
26500
26501 }
26502 - ++item->refcount;
26503 + atomic_inc(&item->refcount);
26504 ref->object = item->object;
26505 object = item->object;
26506 mutex_unlock(&item->mutex);
26507 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
26508 struct drm_global_item *item = &glob[ref->global_type];
26509
26510 mutex_lock(&item->mutex);
26511 - BUG_ON(item->refcount == 0);
26512 + BUG_ON(atomic_read(&item->refcount) == 0);
26513 BUG_ON(ref->object != item->object);
26514 - if (--item->refcount == 0) {
26515 + if (atomic_dec_and_test(&item->refcount)) {
26516 ref->release(ref);
26517 item->object = NULL;
26518 }
26519 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_info.c linux-3.0.8/drivers/gpu/drm/drm_info.c
26520 --- linux-3.0.8/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
26521 +++ linux-3.0.8/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
26522 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
26523 struct drm_local_map *map;
26524 struct drm_map_list *r_list;
26525
26526 - /* Hardcoded from _DRM_FRAME_BUFFER,
26527 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
26528 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
26529 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
26530 + static const char * const types[] = {
26531 + [_DRM_FRAME_BUFFER] = "FB",
26532 + [_DRM_REGISTERS] = "REG",
26533 + [_DRM_SHM] = "SHM",
26534 + [_DRM_AGP] = "AGP",
26535 + [_DRM_SCATTER_GATHER] = "SG",
26536 + [_DRM_CONSISTENT] = "PCI",
26537 + [_DRM_GEM] = "GEM" };
26538 const char *type;
26539 int i;
26540
26541 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
26542 map = r_list->map;
26543 if (!map)
26544 continue;
26545 - if (map->type < 0 || map->type > 5)
26546 + if (map->type >= ARRAY_SIZE(types))
26547 type = "??";
26548 else
26549 type = types[map->type];
26550 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
26551 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
26552 vma->vm_flags & VM_LOCKED ? 'l' : '-',
26553 vma->vm_flags & VM_IO ? 'i' : '-',
26554 +#ifdef CONFIG_GRKERNSEC_HIDESYM
26555 + 0);
26556 +#else
26557 vma->vm_pgoff);
26558 +#endif
26559
26560 #if defined(__i386__)
26561 pgprot = pgprot_val(vma->vm_page_prot);
26562 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_ioc32.c linux-3.0.8/drivers/gpu/drm/drm_ioc32.c
26563 --- linux-3.0.8/drivers/gpu/drm/drm_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26564 +++ linux-3.0.8/drivers/gpu/drm/drm_ioc32.c 2011-10-06 04:17:55.000000000 -0400
26565 @@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26566 request = compat_alloc_user_space(nbytes);
26567 if (!access_ok(VERIFY_WRITE, request, nbytes))
26568 return -EFAULT;
26569 - list = (struct drm_buf_desc *) (request + 1);
26570 + list = (struct drm_buf_desc __user *) (request + 1);
26571
26572 if (__put_user(count, &request->count)
26573 || __put_user(list, &request->list))
26574 @@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26575 request = compat_alloc_user_space(nbytes);
26576 if (!access_ok(VERIFY_WRITE, request, nbytes))
26577 return -EFAULT;
26578 - list = (struct drm_buf_pub *) (request + 1);
26579 + list = (struct drm_buf_pub __user *) (request + 1);
26580
26581 if (__put_user(count, &request->count)
26582 || __put_user(list, &request->list))
26583 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_ioctl.c linux-3.0.8/drivers/gpu/drm/drm_ioctl.c
26584 --- linux-3.0.8/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
26585 +++ linux-3.0.8/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
26586 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26587 stats->data[i].value =
26588 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26589 else
26590 - stats->data[i].value = atomic_read(&dev->counts[i]);
26591 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26592 stats->data[i].type = dev->types[i];
26593 }
26594
26595 diff -urNp linux-3.0.8/drivers/gpu/drm/drm_lock.c linux-3.0.8/drivers/gpu/drm/drm_lock.c
26596 --- linux-3.0.8/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
26597 +++ linux-3.0.8/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
26598 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26599 if (drm_lock_take(&master->lock, lock->context)) {
26600 master->lock.file_priv = file_priv;
26601 master->lock.lock_time = jiffies;
26602 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26603 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26604 break; /* Got lock */
26605 }
26606
26607 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26608 return -EINVAL;
26609 }
26610
26611 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26612 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26613
26614 if (drm_lock_free(&master->lock, lock->context)) {
26615 /* FIXME: Should really bail out here. */
26616 diff -urNp linux-3.0.8/drivers/gpu/drm/i810/i810_dma.c linux-3.0.8/drivers/gpu/drm/i810/i810_dma.c
26617 --- linux-3.0.8/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
26618 +++ linux-3.0.8/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
26619 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26620 dma->buflist[vertex->idx],
26621 vertex->discard, vertex->used);
26622
26623 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26624 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26625 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26626 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26627 sarea_priv->last_enqueue = dev_priv->counter - 1;
26628 sarea_priv->last_dispatch = (int)hw_status[5];
26629
26630 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26631 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26632 mc->last_render);
26633
26634 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26635 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26636 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26637 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26638 sarea_priv->last_enqueue = dev_priv->counter - 1;
26639 sarea_priv->last_dispatch = (int)hw_status[5];
26640
26641 diff -urNp linux-3.0.8/drivers/gpu/drm/i810/i810_drv.h linux-3.0.8/drivers/gpu/drm/i810/i810_drv.h
26642 --- linux-3.0.8/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
26643 +++ linux-3.0.8/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
26644 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26645 int page_flipping;
26646
26647 wait_queue_head_t irq_queue;
26648 - atomic_t irq_received;
26649 - atomic_t irq_emitted;
26650 + atomic_unchecked_t irq_received;
26651 + atomic_unchecked_t irq_emitted;
26652
26653 int front_offset;
26654 } drm_i810_private_t;
26655 diff -urNp linux-3.0.8/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.8/drivers/gpu/drm/i915/i915_debugfs.c
26656 --- linux-3.0.8/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
26657 +++ linux-3.0.8/drivers/gpu/drm/i915/i915_debugfs.c 2011-10-06 04:17:55.000000000 -0400
26658 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26659 I915_READ(GTIMR));
26660 }
26661 seq_printf(m, "Interrupts received: %d\n",
26662 - atomic_read(&dev_priv->irq_received));
26663 + atomic_read_unchecked(&dev_priv->irq_received));
26664 for (i = 0; i < I915_NUM_RINGS; i++) {
26665 if (IS_GEN6(dev)) {
26666 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26667 @@ -1147,7 +1147,7 @@ static int i915_opregion(struct seq_file
26668 return ret;
26669
26670 if (opregion->header)
26671 - seq_write(m, opregion->header, OPREGION_SIZE);
26672 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26673
26674 mutex_unlock(&dev->struct_mutex);
26675
26676 diff -urNp linux-3.0.8/drivers/gpu/drm/i915/i915_dma.c linux-3.0.8/drivers/gpu/drm/i915/i915_dma.c
26677 --- linux-3.0.8/drivers/gpu/drm/i915/i915_dma.c 2011-10-24 08:05:21.000000000 -0400
26678 +++ linux-3.0.8/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
26679 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
26680 bool can_switch;
26681
26682 spin_lock(&dev->count_lock);
26683 - can_switch = (dev->open_count == 0);
26684 + can_switch = (local_read(&dev->open_count) == 0);
26685 spin_unlock(&dev->count_lock);
26686 return can_switch;
26687 }
26688 diff -urNp linux-3.0.8/drivers/gpu/drm/i915/i915_drv.h linux-3.0.8/drivers/gpu/drm/i915/i915_drv.h
26689 --- linux-3.0.8/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
26690 +++ linux-3.0.8/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
26691 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
26692 /* render clock increase/decrease */
26693 /* display clock increase/decrease */
26694 /* pll clock increase/decrease */
26695 -};
26696 +} __no_const;
26697
26698 struct intel_device_info {
26699 u8 gen;
26700 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
26701 int current_page;
26702 int page_flipping;
26703
26704 - atomic_t irq_received;
26705 + atomic_unchecked_t irq_received;
26706
26707 /* protects the irq masks */
26708 spinlock_t irq_lock;
26709 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
26710 * will be page flipped away on the next vblank. When it
26711 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26712 */
26713 - atomic_t pending_flip;
26714 + atomic_unchecked_t pending_flip;
26715 };
26716
26717 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26718 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
26719 extern void intel_teardown_gmbus(struct drm_device *dev);
26720 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26721 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26722 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26723 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26724 {
26725 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26726 }
26727 diff -urNp linux-3.0.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26728 --- linux-3.0.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
26729 +++ linux-3.0.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
26730 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26731 i915_gem_clflush_object(obj);
26732
26733 if (obj->base.pending_write_domain)
26734 - cd->flips |= atomic_read(&obj->pending_flip);
26735 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26736
26737 /* The actual obj->write_domain will be updated with
26738 * pending_write_domain after we emit the accumulated flush for all
26739 diff -urNp linux-3.0.8/drivers/gpu/drm/i915/i915_irq.c linux-3.0.8/drivers/gpu/drm/i915/i915_irq.c
26740 --- linux-3.0.8/drivers/gpu/drm/i915/i915_irq.c 2011-10-24 08:05:21.000000000 -0400
26741 +++ linux-3.0.8/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
26742 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
26743 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26744 struct drm_i915_master_private *master_priv;
26745
26746 - atomic_inc(&dev_priv->irq_received);
26747 + atomic_inc_unchecked(&dev_priv->irq_received);
26748
26749 /* disable master interrupt before clearing iir */
26750 de_ier = I915_READ(DEIER);
26751 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
26752 struct drm_i915_master_private *master_priv;
26753 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26754
26755 - atomic_inc(&dev_priv->irq_received);
26756 + atomic_inc_unchecked(&dev_priv->irq_received);
26757
26758 if (IS_GEN6(dev))
26759 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26760 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
26761 int ret = IRQ_NONE, pipe;
26762 bool blc_event = false;
26763
26764 - atomic_inc(&dev_priv->irq_received);
26765 + atomic_inc_unchecked(&dev_priv->irq_received);
26766
26767 iir = I915_READ(IIR);
26768
26769 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
26770 {
26771 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26772
26773 - atomic_set(&dev_priv->irq_received, 0);
26774 + atomic_set_unchecked(&dev_priv->irq_received, 0);
26775
26776 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26777 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26778 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
26779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26780 int pipe;
26781
26782 - atomic_set(&dev_priv->irq_received, 0);
26783 + atomic_set_unchecked(&dev_priv->irq_received, 0);
26784
26785 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26786 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26787 diff -urNp linux-3.0.8/drivers/gpu/drm/i915/intel_display.c linux-3.0.8/drivers/gpu/drm/i915/intel_display.c
26788 --- linux-3.0.8/drivers/gpu/drm/i915/intel_display.c 2011-10-24 08:05:21.000000000 -0400
26789 +++ linux-3.0.8/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
26790 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26791
26792 wait_event(dev_priv->pending_flip_queue,
26793 atomic_read(&dev_priv->mm.wedged) ||
26794 - atomic_read(&obj->pending_flip) == 0);
26795 + atomic_read_unchecked(&obj->pending_flip) == 0);
26796
26797 /* Big Hammer, we also need to ensure that any pending
26798 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26799 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
26800 obj = to_intel_framebuffer(crtc->fb)->obj;
26801 dev_priv = crtc->dev->dev_private;
26802 wait_event(dev_priv->pending_flip_queue,
26803 - atomic_read(&obj->pending_flip) == 0);
26804 + atomic_read_unchecked(&obj->pending_flip) == 0);
26805 }
26806
26807 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26808 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
26809
26810 atomic_clear_mask(1 << intel_crtc->plane,
26811 &obj->pending_flip.counter);
26812 - if (atomic_read(&obj->pending_flip) == 0)
26813 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
26814 wake_up(&dev_priv->pending_flip_queue);
26815
26816 schedule_work(&work->work);
26817 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
26818 /* Block clients from rendering to the new back buffer until
26819 * the flip occurs and the object is no longer visible.
26820 */
26821 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26822 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26823
26824 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26825 if (ret)
26826 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
26827 return 0;
26828
26829 cleanup_pending:
26830 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26831 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26832 cleanup_objs:
26833 drm_gem_object_unreference(&work->old_fb_obj->base);
26834 drm_gem_object_unreference(&obj->base);
26835 diff -urNp linux-3.0.8/drivers/gpu/drm/mga/mga_drv.h linux-3.0.8/drivers/gpu/drm/mga/mga_drv.h
26836 --- linux-3.0.8/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
26837 +++ linux-3.0.8/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
26838 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26839 u32 clear_cmd;
26840 u32 maccess;
26841
26842 - atomic_t vbl_received; /**< Number of vblanks received. */
26843 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26844 wait_queue_head_t fence_queue;
26845 - atomic_t last_fence_retired;
26846 + atomic_unchecked_t last_fence_retired;
26847 u32 next_fence_to_post;
26848
26849 unsigned int fb_cpp;
26850 diff -urNp linux-3.0.8/drivers/gpu/drm/mga/mga_irq.c linux-3.0.8/drivers/gpu/drm/mga/mga_irq.c
26851 --- linux-3.0.8/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
26852 +++ linux-3.0.8/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
26853 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26854 if (crtc != 0)
26855 return 0;
26856
26857 - return atomic_read(&dev_priv->vbl_received);
26858 + return atomic_read_unchecked(&dev_priv->vbl_received);
26859 }
26860
26861
26862 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26863 /* VBLANK interrupt */
26864 if (status & MGA_VLINEPEN) {
26865 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26866 - atomic_inc(&dev_priv->vbl_received);
26867 + atomic_inc_unchecked(&dev_priv->vbl_received);
26868 drm_handle_vblank(dev, 0);
26869 handled = 1;
26870 }
26871 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26872 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26873 MGA_WRITE(MGA_PRIMEND, prim_end);
26874
26875 - atomic_inc(&dev_priv->last_fence_retired);
26876 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
26877 DRM_WAKEUP(&dev_priv->fence_queue);
26878 handled = 1;
26879 }
26880 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26881 * using fences.
26882 */
26883 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26884 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26885 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26886 - *sequence) <= (1 << 23)));
26887
26888 *sequence = cur_fence;
26889 diff -urNp linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_bios.c
26890 --- linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
26891 +++ linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
26892 @@ -200,7 +200,7 @@ struct methods {
26893 const char desc[8];
26894 void (*loadbios)(struct drm_device *, uint8_t *);
26895 const bool rw;
26896 -};
26897 +} __do_const;
26898
26899 static struct methods shadow_methods[] = {
26900 { "PRAMIN", load_vbios_pramin, true },
26901 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
26902 struct bit_table {
26903 const char id;
26904 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26905 -};
26906 +} __no_const;
26907
26908 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26909
26910 diff -urNp linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_drv.h
26911 --- linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
26912 +++ linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
26913 @@ -227,7 +227,7 @@ struct nouveau_channel {
26914 struct list_head pending;
26915 uint32_t sequence;
26916 uint32_t sequence_ack;
26917 - atomic_t last_sequence_irq;
26918 + atomic_unchecked_t last_sequence_irq;
26919 } fence;
26920
26921 /* DMA push buffer */
26922 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
26923 u32 handle, u16 class);
26924 void (*set_tile_region)(struct drm_device *dev, int i);
26925 void (*tlb_flush)(struct drm_device *, int engine);
26926 -};
26927 +} __no_const;
26928
26929 struct nouveau_instmem_engine {
26930 void *priv;
26931 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
26932 struct nouveau_mc_engine {
26933 int (*init)(struct drm_device *dev);
26934 void (*takedown)(struct drm_device *dev);
26935 -};
26936 +} __no_const;
26937
26938 struct nouveau_timer_engine {
26939 int (*init)(struct drm_device *dev);
26940 void (*takedown)(struct drm_device *dev);
26941 uint64_t (*read)(struct drm_device *dev);
26942 -};
26943 +} __no_const;
26944
26945 struct nouveau_fb_engine {
26946 int num_tiles;
26947 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
26948 void (*put)(struct drm_device *, struct nouveau_mem **);
26949
26950 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26951 -};
26952 +} __no_const;
26953
26954 struct nouveau_engine {
26955 struct nouveau_instmem_engine instmem;
26956 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
26957 struct drm_global_reference mem_global_ref;
26958 struct ttm_bo_global_ref bo_global_ref;
26959 struct ttm_bo_device bdev;
26960 - atomic_t validate_sequence;
26961 + atomic_unchecked_t validate_sequence;
26962 } ttm;
26963
26964 struct {
26965 diff -urNp linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_fence.c
26966 --- linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
26967 +++ linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
26968 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26969 if (USE_REFCNT(dev))
26970 sequence = nvchan_rd32(chan, 0x48);
26971 else
26972 - sequence = atomic_read(&chan->fence.last_sequence_irq);
26973 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26974
26975 if (chan->fence.sequence_ack == sequence)
26976 goto out;
26977 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
26978
26979 INIT_LIST_HEAD(&chan->fence.pending);
26980 spin_lock_init(&chan->fence.lock);
26981 - atomic_set(&chan->fence.last_sequence_irq, 0);
26982 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26983 return 0;
26984 }
26985
26986 diff -urNp linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_gem.c
26987 --- linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
26988 +++ linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
26989 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
26990 int trycnt = 0;
26991 int ret, i;
26992
26993 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26994 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26995 retry:
26996 if (++trycnt > 100000) {
26997 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26998 diff -urNp linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_state.c
26999 --- linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
27000 +++ linux-3.0.8/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
27001 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
27002 bool can_switch;
27003
27004 spin_lock(&dev->count_lock);
27005 - can_switch = (dev->open_count == 0);
27006 + can_switch = (local_read(&dev->open_count) == 0);
27007 spin_unlock(&dev->count_lock);
27008 return can_switch;
27009 }
27010 diff -urNp linux-3.0.8/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.8/drivers/gpu/drm/nouveau/nv04_graph.c
27011 --- linux-3.0.8/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
27012 +++ linux-3.0.8/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
27013 @@ -560,7 +560,7 @@ static int
27014 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
27015 u32 class, u32 mthd, u32 data)
27016 {
27017 - atomic_set(&chan->fence.last_sequence_irq, data);
27018 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
27019 return 0;
27020 }
27021
27022 diff -urNp linux-3.0.8/drivers/gpu/drm/r128/r128_cce.c linux-3.0.8/drivers/gpu/drm/r128/r128_cce.c
27023 --- linux-3.0.8/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
27024 +++ linux-3.0.8/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
27025 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
27026
27027 /* GH: Simple idle check.
27028 */
27029 - atomic_set(&dev_priv->idle_count, 0);
27030 + atomic_set_unchecked(&dev_priv->idle_count, 0);
27031
27032 /* We don't support anything other than bus-mastering ring mode,
27033 * but the ring can be in either AGP or PCI space for the ring
27034 diff -urNp linux-3.0.8/drivers/gpu/drm/r128/r128_drv.h linux-3.0.8/drivers/gpu/drm/r128/r128_drv.h
27035 --- linux-3.0.8/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
27036 +++ linux-3.0.8/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
27037 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
27038 int is_pci;
27039 unsigned long cce_buffers_offset;
27040
27041 - atomic_t idle_count;
27042 + atomic_unchecked_t idle_count;
27043
27044 int page_flipping;
27045 int current_page;
27046 u32 crtc_offset;
27047 u32 crtc_offset_cntl;
27048
27049 - atomic_t vbl_received;
27050 + atomic_unchecked_t vbl_received;
27051
27052 u32 color_fmt;
27053 unsigned int front_offset;
27054 diff -urNp linux-3.0.8/drivers/gpu/drm/r128/r128_irq.c linux-3.0.8/drivers/gpu/drm/r128/r128_irq.c
27055 --- linux-3.0.8/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
27056 +++ linux-3.0.8/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
27057 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
27058 if (crtc != 0)
27059 return 0;
27060
27061 - return atomic_read(&dev_priv->vbl_received);
27062 + return atomic_read_unchecked(&dev_priv->vbl_received);
27063 }
27064
27065 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
27066 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
27067 /* VBLANK interrupt */
27068 if (status & R128_CRTC_VBLANK_INT) {
27069 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
27070 - atomic_inc(&dev_priv->vbl_received);
27071 + atomic_inc_unchecked(&dev_priv->vbl_received);
27072 drm_handle_vblank(dev, 0);
27073 return IRQ_HANDLED;
27074 }
27075 diff -urNp linux-3.0.8/drivers/gpu/drm/r128/r128_state.c linux-3.0.8/drivers/gpu/drm/r128/r128_state.c
27076 --- linux-3.0.8/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
27077 +++ linux-3.0.8/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
27078 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
27079
27080 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
27081 {
27082 - if (atomic_read(&dev_priv->idle_count) == 0)
27083 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
27084 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
27085 else
27086 - atomic_set(&dev_priv->idle_count, 0);
27087 + atomic_set_unchecked(&dev_priv->idle_count, 0);
27088 }
27089
27090 #endif
27091 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/atom.c linux-3.0.8/drivers/gpu/drm/radeon/atom.c
27092 --- linux-3.0.8/drivers/gpu/drm/radeon/atom.c 2011-10-25 09:10:33.000000000 -0400
27093 +++ linux-3.0.8/drivers/gpu/drm/radeon/atom.c 2011-10-25 09:10:41.000000000 -0400
27094 @@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct c
27095 char name[512];
27096 int i;
27097
27098 + pax_track_stack();
27099 +
27100 ctx->card = card;
27101 ctx->bios = bios;
27102
27103 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.8/drivers/gpu/drm/radeon/mkregtable.c
27104 --- linux-3.0.8/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
27105 +++ linux-3.0.8/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
27106 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
27107 regex_t mask_rex;
27108 regmatch_t match[4];
27109 char buf[1024];
27110 - size_t end;
27111 + long end;
27112 int len;
27113 int done = 0;
27114 int r;
27115 unsigned o;
27116 struct offset *offset;
27117 char last_reg_s[10];
27118 - int last_reg;
27119 + unsigned long last_reg;
27120
27121 if (regcomp
27122 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
27123 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_atombios.c
27124 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
27125 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
27126 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
27127 struct radeon_gpio_rec gpio;
27128 struct radeon_hpd hpd;
27129
27130 + pax_track_stack();
27131 +
27132 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
27133 return false;
27134
27135 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_device.c
27136 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_device.c 2011-10-24 08:05:21.000000000 -0400
27137 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
27138 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
27139 bool can_switch;
27140
27141 spin_lock(&dev->count_lock);
27142 - can_switch = (dev->open_count == 0);
27143 + can_switch = (local_read(&dev->open_count) == 0);
27144 spin_unlock(&dev->count_lock);
27145 return can_switch;
27146 }
27147 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_display.c
27148 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_display.c 2011-10-24 08:05:21.000000000 -0400
27149 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
27150 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
27151 uint32_t post_div;
27152 u32 pll_out_min, pll_out_max;
27153
27154 + pax_track_stack();
27155 +
27156 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
27157 freq = freq * 1000;
27158
27159 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.8/drivers/gpu/drm/radeon/radeon_drv.h
27160 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
27161 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
27162 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
27163
27164 /* SW interrupt */
27165 wait_queue_head_t swi_queue;
27166 - atomic_t swi_emitted;
27167 + atomic_unchecked_t swi_emitted;
27168 int vblank_crtc;
27169 uint32_t irq_enable_reg;
27170 uint32_t r500_disp_irq_reg;
27171 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_fence.c
27172 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
27173 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
27174 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
27175 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
27176 return 0;
27177 }
27178 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
27179 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
27180 if (!rdev->cp.ready)
27181 /* FIXME: cp is not running assume everythings is done right
27182 * away
27183 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
27184 return r;
27185 }
27186 radeon_fence_write(rdev, 0);
27187 - atomic_set(&rdev->fence_drv.seq, 0);
27188 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
27189 INIT_LIST_HEAD(&rdev->fence_drv.created);
27190 INIT_LIST_HEAD(&rdev->fence_drv.emited);
27191 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
27192 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon.h linux-3.0.8/drivers/gpu/drm/radeon/radeon.h
27193 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon.h 2011-10-24 08:05:30.000000000 -0400
27194 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:55:27.000000000 -0400
27195 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
27196 */
27197 struct radeon_fence_driver {
27198 uint32_t scratch_reg;
27199 - atomic_t seq;
27200 + atomic_unchecked_t seq;
27201 uint32_t last_seq;
27202 unsigned long last_jiffies;
27203 unsigned long last_timeout;
27204 @@ -961,7 +961,7 @@ struct radeon_asic {
27205 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
27206 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
27207 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
27208 -};
27209 +} __no_const;
27210
27211 /*
27212 * Asic structures
27213 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_ioc32.c
27214 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
27215 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
27216 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
27217 request = compat_alloc_user_space(sizeof(*request));
27218 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
27219 || __put_user(req32.param, &request->param)
27220 - || __put_user((void __user *)(unsigned long)req32.value,
27221 + || __put_user((unsigned long)req32.value,
27222 &request->value))
27223 return -EFAULT;
27224
27225 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_irq.c
27226 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
27227 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
27228 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
27229 unsigned int ret;
27230 RING_LOCALS;
27231
27232 - atomic_inc(&dev_priv->swi_emitted);
27233 - ret = atomic_read(&dev_priv->swi_emitted);
27234 + atomic_inc_unchecked(&dev_priv->swi_emitted);
27235 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
27236
27237 BEGIN_RING(4);
27238 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
27239 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
27240 drm_radeon_private_t *dev_priv =
27241 (drm_radeon_private_t *) dev->dev_private;
27242
27243 - atomic_set(&dev_priv->swi_emitted, 0);
27244 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
27245 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
27246
27247 dev->max_vblank_count = 0x001fffff;
27248 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_state.c
27249 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
27250 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
27251 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
27252 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
27253 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
27254
27255 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27256 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27257 sarea_priv->nbox * sizeof(depth_boxes[0])))
27258 return -EFAULT;
27259
27260 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
27261 {
27262 drm_radeon_private_t *dev_priv = dev->dev_private;
27263 drm_radeon_getparam_t *param = data;
27264 - int value;
27265 + int value = 0;
27266
27267 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
27268
27269 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.8/drivers/gpu/drm/radeon/radeon_ttm.c
27270 --- linux-3.0.8/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-24 08:05:30.000000000 -0400
27271 +++ linux-3.0.8/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:55:27.000000000 -0400
27272 @@ -649,8 +649,10 @@ int radeon_mmap(struct file *filp, struc
27273 }
27274 if (unlikely(ttm_vm_ops == NULL)) {
27275 ttm_vm_ops = vma->vm_ops;
27276 - radeon_ttm_vm_ops = *ttm_vm_ops;
27277 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27278 + pax_open_kernel();
27279 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
27280 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27281 + pax_close_kernel();
27282 }
27283 vma->vm_ops = &radeon_ttm_vm_ops;
27284 return 0;
27285 diff -urNp linux-3.0.8/drivers/gpu/drm/radeon/rs690.c linux-3.0.8/drivers/gpu/drm/radeon/rs690.c
27286 --- linux-3.0.8/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
27287 +++ linux-3.0.8/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
27288 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
27289 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
27290 rdev->pm.sideport_bandwidth.full)
27291 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
27292 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
27293 + read_delay_latency.full = dfixed_const(800 * 1000);
27294 read_delay_latency.full = dfixed_div(read_delay_latency,
27295 rdev->pm.igp_sideport_mclk);
27296 + a.full = dfixed_const(370);
27297 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
27298 } else {
27299 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
27300 rdev->pm.k8_bandwidth.full)
27301 diff -urNp linux-3.0.8/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.8/drivers/gpu/drm/ttm/ttm_page_alloc.c
27302 --- linux-3.0.8/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
27303 +++ linux-3.0.8/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
27304 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
27305 static int ttm_pool_mm_shrink(struct shrinker *shrink,
27306 struct shrink_control *sc)
27307 {
27308 - static atomic_t start_pool = ATOMIC_INIT(0);
27309 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
27310 unsigned i;
27311 - unsigned pool_offset = atomic_add_return(1, &start_pool);
27312 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
27313 struct ttm_page_pool *pool;
27314 int shrink_pages = sc->nr_to_scan;
27315
27316 diff -urNp linux-3.0.8/drivers/gpu/drm/via/via_drv.h linux-3.0.8/drivers/gpu/drm/via/via_drv.h
27317 --- linux-3.0.8/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
27318 +++ linux-3.0.8/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
27319 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
27320 typedef uint32_t maskarray_t[5];
27321
27322 typedef struct drm_via_irq {
27323 - atomic_t irq_received;
27324 + atomic_unchecked_t irq_received;
27325 uint32_t pending_mask;
27326 uint32_t enable_mask;
27327 wait_queue_head_t irq_queue;
27328 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
27329 struct timeval last_vblank;
27330 int last_vblank_valid;
27331 unsigned usec_per_vblank;
27332 - atomic_t vbl_received;
27333 + atomic_unchecked_t vbl_received;
27334 drm_via_state_t hc_state;
27335 char pci_buf[VIA_PCI_BUF_SIZE];
27336 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
27337 diff -urNp linux-3.0.8/drivers/gpu/drm/via/via_irq.c linux-3.0.8/drivers/gpu/drm/via/via_irq.c
27338 --- linux-3.0.8/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
27339 +++ linux-3.0.8/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
27340 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
27341 if (crtc != 0)
27342 return 0;
27343
27344 - return atomic_read(&dev_priv->vbl_received);
27345 + return atomic_read_unchecked(&dev_priv->vbl_received);
27346 }
27347
27348 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
27349 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
27350
27351 status = VIA_READ(VIA_REG_INTERRUPT);
27352 if (status & VIA_IRQ_VBLANK_PENDING) {
27353 - atomic_inc(&dev_priv->vbl_received);
27354 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
27355 + atomic_inc_unchecked(&dev_priv->vbl_received);
27356 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
27357 do_gettimeofday(&cur_vblank);
27358 if (dev_priv->last_vblank_valid) {
27359 dev_priv->usec_per_vblank =
27360 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27361 dev_priv->last_vblank = cur_vblank;
27362 dev_priv->last_vblank_valid = 1;
27363 }
27364 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
27365 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
27366 DRM_DEBUG("US per vblank is: %u\n",
27367 dev_priv->usec_per_vblank);
27368 }
27369 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27370
27371 for (i = 0; i < dev_priv->num_irqs; ++i) {
27372 if (status & cur_irq->pending_mask) {
27373 - atomic_inc(&cur_irq->irq_received);
27374 + atomic_inc_unchecked(&cur_irq->irq_received);
27375 DRM_WAKEUP(&cur_irq->irq_queue);
27376 handled = 1;
27377 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
27378 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
27379 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27380 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
27381 masks[irq][4]));
27382 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
27383 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
27384 } else {
27385 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27386 (((cur_irq_sequence =
27387 - atomic_read(&cur_irq->irq_received)) -
27388 + atomic_read_unchecked(&cur_irq->irq_received)) -
27389 *sequence) <= (1 << 23)));
27390 }
27391 *sequence = cur_irq_sequence;
27392 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
27393 }
27394
27395 for (i = 0; i < dev_priv->num_irqs; ++i) {
27396 - atomic_set(&cur_irq->irq_received, 0);
27397 + atomic_set_unchecked(&cur_irq->irq_received, 0);
27398 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
27399 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
27400 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
27401 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
27402 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
27403 case VIA_IRQ_RELATIVE:
27404 irqwait->request.sequence +=
27405 - atomic_read(&cur_irq->irq_received);
27406 + atomic_read_unchecked(&cur_irq->irq_received);
27407 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
27408 case VIA_IRQ_ABSOLUTE:
27409 break;
27410 diff -urNp linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
27411 --- linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
27412 +++ linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
27413 @@ -240,7 +240,7 @@ struct vmw_private {
27414 * Fencing and IRQs.
27415 */
27416
27417 - atomic_t fence_seq;
27418 + atomic_unchecked_t fence_seq;
27419 wait_queue_head_t fence_queue;
27420 wait_queue_head_t fifo_queue;
27421 atomic_t fence_queue_waiters;
27422 diff -urNp linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
27423 --- linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-07-21 22:17:23.000000000 -0400
27424 +++ linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-10-06 04:17:55.000000000 -0400
27425 @@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
27426 struct drm_vmw_fence_rep fence_rep;
27427 struct drm_vmw_fence_rep __user *user_fence_rep;
27428 int ret;
27429 - void *user_cmd;
27430 + void __user *user_cmd;
27431 void *cmd;
27432 uint32_t sequence;
27433 struct vmw_sw_context *sw_context = &dev_priv->ctx;
27434 diff -urNp linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
27435 --- linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
27436 +++ linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
27437 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
27438 while (!vmw_lag_lt(queue, us)) {
27439 spin_lock(&queue->lock);
27440 if (list_empty(&queue->head))
27441 - sequence = atomic_read(&dev_priv->fence_seq);
27442 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27443 else {
27444 fence = list_first_entry(&queue->head,
27445 struct vmw_fence, head);
27446 diff -urNp linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
27447 --- linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
27448 +++ linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-10-06 04:17:55.000000000 -0400
27449 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
27450 (unsigned int) min,
27451 (unsigned int) fifo->capabilities);
27452
27453 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27454 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27455 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
27456 vmw_fence_queue_init(&fifo->fence_queue);
27457 return vmw_fifo_send_fence(dev_priv, &dummy);
27458 @@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
27459 if (reserveable)
27460 iowrite32(bytes, fifo_mem +
27461 SVGA_FIFO_RESERVED);
27462 - return fifo_mem + (next_cmd >> 2);
27463 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
27464 } else {
27465 need_bounce = true;
27466 }
27467 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27468
27469 fm = vmw_fifo_reserve(dev_priv, bytes);
27470 if (unlikely(fm == NULL)) {
27471 - *sequence = atomic_read(&dev_priv->fence_seq);
27472 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27473 ret = -ENOMEM;
27474 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
27475 false, 3*HZ);
27476 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27477 }
27478
27479 do {
27480 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
27481 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
27482 } while (*sequence == 0);
27483
27484 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
27485 diff -urNp linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
27486 --- linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
27487 +++ linux-3.0.8/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
27488 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
27489 * emitted. Then the fence is stale and signaled.
27490 */
27491
27492 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
27493 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
27494 > VMW_FENCE_WRAP);
27495
27496 return ret;
27497 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
27498
27499 if (fifo_idle)
27500 down_read(&fifo_state->rwsem);
27501 - signal_seq = atomic_read(&dev_priv->fence_seq);
27502 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
27503 ret = 0;
27504
27505 for (;;) {
27506 diff -urNp linux-3.0.8/drivers/hid/hid-core.c linux-3.0.8/drivers/hid/hid-core.c
27507 --- linux-3.0.8/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
27508 +++ linux-3.0.8/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
27509 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
27510
27511 int hid_add_device(struct hid_device *hdev)
27512 {
27513 - static atomic_t id = ATOMIC_INIT(0);
27514 + static atomic_unchecked_t id = ATOMIC_INIT(0);
27515 int ret;
27516
27517 if (WARN_ON(hdev->status & HID_STAT_ADDED))
27518 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
27519 /* XXX hack, any other cleaner solution after the driver core
27520 * is converted to allow more than 20 bytes as the device name? */
27521 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
27522 - hdev->vendor, hdev->product, atomic_inc_return(&id));
27523 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
27524
27525 hid_debug_register(hdev, dev_name(&hdev->dev));
27526 ret = device_add(&hdev->dev);
27527 diff -urNp linux-3.0.8/drivers/hid/usbhid/hiddev.c linux-3.0.8/drivers/hid/usbhid/hiddev.c
27528 --- linux-3.0.8/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
27529 +++ linux-3.0.8/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
27530 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
27531 break;
27532
27533 case HIDIOCAPPLICATION:
27534 - if (arg < 0 || arg >= hid->maxapplication)
27535 + if (arg >= hid->maxapplication)
27536 break;
27537
27538 for (i = 0; i < hid->maxcollection; i++)
27539 diff -urNp linux-3.0.8/drivers/hwmon/acpi_power_meter.c linux-3.0.8/drivers/hwmon/acpi_power_meter.c
27540 --- linux-3.0.8/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
27541 +++ linux-3.0.8/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
27542 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
27543 return res;
27544
27545 temp /= 1000;
27546 - if (temp < 0)
27547 - return -EINVAL;
27548
27549 mutex_lock(&resource->lock);
27550 resource->trip[attr->index - 7] = temp;
27551 diff -urNp linux-3.0.8/drivers/hwmon/sht15.c linux-3.0.8/drivers/hwmon/sht15.c
27552 --- linux-3.0.8/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
27553 +++ linux-3.0.8/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
27554 @@ -166,7 +166,7 @@ struct sht15_data {
27555 int supply_uV;
27556 bool supply_uV_valid;
27557 struct work_struct update_supply_work;
27558 - atomic_t interrupt_handled;
27559 + atomic_unchecked_t interrupt_handled;
27560 };
27561
27562 /**
27563 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
27564 return ret;
27565
27566 gpio_direction_input(data->pdata->gpio_data);
27567 - atomic_set(&data->interrupt_handled, 0);
27568 + atomic_set_unchecked(&data->interrupt_handled, 0);
27569
27570 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27571 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27572 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27573 /* Only relevant if the interrupt hasn't occurred. */
27574 - if (!atomic_read(&data->interrupt_handled))
27575 + if (!atomic_read_unchecked(&data->interrupt_handled))
27576 schedule_work(&data->read_work);
27577 }
27578 ret = wait_event_timeout(data->wait_queue,
27579 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27580
27581 /* First disable the interrupt */
27582 disable_irq_nosync(irq);
27583 - atomic_inc(&data->interrupt_handled);
27584 + atomic_inc_unchecked(&data->interrupt_handled);
27585 /* Then schedule a reading work struct */
27586 if (data->state != SHT15_READING_NOTHING)
27587 schedule_work(&data->read_work);
27588 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27589 * If not, then start the interrupt again - care here as could
27590 * have gone low in meantime so verify it hasn't!
27591 */
27592 - atomic_set(&data->interrupt_handled, 0);
27593 + atomic_set_unchecked(&data->interrupt_handled, 0);
27594 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27595 /* If still not occurred or another handler has been scheduled */
27596 if (gpio_get_value(data->pdata->gpio_data)
27597 - || atomic_read(&data->interrupt_handled))
27598 + || atomic_read_unchecked(&data->interrupt_handled))
27599 return;
27600 }
27601
27602 diff -urNp linux-3.0.8/drivers/hwmon/w83791d.c linux-3.0.8/drivers/hwmon/w83791d.c
27603 --- linux-3.0.8/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
27604 +++ linux-3.0.8/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
27605 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
27606 struct i2c_board_info *info);
27607 static int w83791d_remove(struct i2c_client *client);
27608
27609 -static int w83791d_read(struct i2c_client *client, u8 register);
27610 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
27611 +static int w83791d_read(struct i2c_client *client, u8 reg);
27612 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
27613 static struct w83791d_data *w83791d_update_device(struct device *dev);
27614
27615 #ifdef DEBUG
27616 diff -urNp linux-3.0.8/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.8/drivers/i2c/busses/i2c-amd756-s4882.c
27617 --- linux-3.0.8/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
27618 +++ linux-3.0.8/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
27619 @@ -43,7 +43,7 @@
27620 extern struct i2c_adapter amd756_smbus;
27621
27622 static struct i2c_adapter *s4882_adapter;
27623 -static struct i2c_algorithm *s4882_algo;
27624 +static i2c_algorithm_no_const *s4882_algo;
27625
27626 /* Wrapper access functions for multiplexed SMBus */
27627 static DEFINE_MUTEX(amd756_lock);
27628 diff -urNp linux-3.0.8/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.8/drivers/i2c/busses/i2c-nforce2-s4985.c
27629 --- linux-3.0.8/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
27630 +++ linux-3.0.8/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
27631 @@ -41,7 +41,7 @@
27632 extern struct i2c_adapter *nforce2_smbus;
27633
27634 static struct i2c_adapter *s4985_adapter;
27635 -static struct i2c_algorithm *s4985_algo;
27636 +static i2c_algorithm_no_const *s4985_algo;
27637
27638 /* Wrapper access functions for multiplexed SMBus */
27639 static DEFINE_MUTEX(nforce2_lock);
27640 diff -urNp linux-3.0.8/drivers/i2c/i2c-mux.c linux-3.0.8/drivers/i2c/i2c-mux.c
27641 --- linux-3.0.8/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
27642 +++ linux-3.0.8/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
27643 @@ -28,7 +28,7 @@
27644 /* multiplexer per channel data */
27645 struct i2c_mux_priv {
27646 struct i2c_adapter adap;
27647 - struct i2c_algorithm algo;
27648 + i2c_algorithm_no_const algo;
27649
27650 struct i2c_adapter *parent;
27651 void *mux_dev; /* the mux chip/device */
27652 diff -urNp linux-3.0.8/drivers/ide/aec62xx.c linux-3.0.8/drivers/ide/aec62xx.c
27653 --- linux-3.0.8/drivers/ide/aec62xx.c 2011-07-21 22:17:23.000000000 -0400
27654 +++ linux-3.0.8/drivers/ide/aec62xx.c 2011-10-11 10:44:33.000000000 -0400
27655 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27656 .cable_detect = atp86x_cable_detect,
27657 };
27658
27659 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27660 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27661 { /* 0: AEC6210 */
27662 .name = DRV_NAME,
27663 .init_chipset = init_chipset_aec62xx,
27664 diff -urNp linux-3.0.8/drivers/ide/alim15x3.c linux-3.0.8/drivers/ide/alim15x3.c
27665 --- linux-3.0.8/drivers/ide/alim15x3.c 2011-07-21 22:17:23.000000000 -0400
27666 +++ linux-3.0.8/drivers/ide/alim15x3.c 2011-10-11 10:44:33.000000000 -0400
27667 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27668 .dma_sff_read_status = ide_dma_sff_read_status,
27669 };
27670
27671 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
27672 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
27673 .name = DRV_NAME,
27674 .init_chipset = init_chipset_ali15x3,
27675 .init_hwif = init_hwif_ali15x3,
27676 diff -urNp linux-3.0.8/drivers/ide/amd74xx.c linux-3.0.8/drivers/ide/amd74xx.c
27677 --- linux-3.0.8/drivers/ide/amd74xx.c 2011-07-21 22:17:23.000000000 -0400
27678 +++ linux-3.0.8/drivers/ide/amd74xx.c 2011-10-11 10:44:33.000000000 -0400
27679 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27680 .udma_mask = udma, \
27681 }
27682
27683 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27684 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27685 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27686 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27687 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27688 diff -urNp linux-3.0.8/drivers/ide/atiixp.c linux-3.0.8/drivers/ide/atiixp.c
27689 --- linux-3.0.8/drivers/ide/atiixp.c 2011-07-21 22:17:23.000000000 -0400
27690 +++ linux-3.0.8/drivers/ide/atiixp.c 2011-10-11 10:44:33.000000000 -0400
27691 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27692 .cable_detect = atiixp_cable_detect,
27693 };
27694
27695 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27696 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27697 { /* 0: IXP200/300/400/700 */
27698 .name = DRV_NAME,
27699 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27700 diff -urNp linux-3.0.8/drivers/ide/cmd64x.c linux-3.0.8/drivers/ide/cmd64x.c
27701 --- linux-3.0.8/drivers/ide/cmd64x.c 2011-07-21 22:17:23.000000000 -0400
27702 +++ linux-3.0.8/drivers/ide/cmd64x.c 2011-10-11 10:44:33.000000000 -0400
27703 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27704 .dma_sff_read_status = ide_dma_sff_read_status,
27705 };
27706
27707 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27708 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27709 { /* 0: CMD643 */
27710 .name = DRV_NAME,
27711 .init_chipset = init_chipset_cmd64x,
27712 diff -urNp linux-3.0.8/drivers/ide/cs5520.c linux-3.0.8/drivers/ide/cs5520.c
27713 --- linux-3.0.8/drivers/ide/cs5520.c 2011-07-21 22:17:23.000000000 -0400
27714 +++ linux-3.0.8/drivers/ide/cs5520.c 2011-10-11 10:44:33.000000000 -0400
27715 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27716 .set_dma_mode = cs5520_set_dma_mode,
27717 };
27718
27719 -static const struct ide_port_info cyrix_chipset __devinitdata = {
27720 +static const struct ide_port_info cyrix_chipset __devinitconst = {
27721 .name = DRV_NAME,
27722 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27723 .port_ops = &cs5520_port_ops,
27724 diff -urNp linux-3.0.8/drivers/ide/cs5530.c linux-3.0.8/drivers/ide/cs5530.c
27725 --- linux-3.0.8/drivers/ide/cs5530.c 2011-07-21 22:17:23.000000000 -0400
27726 +++ linux-3.0.8/drivers/ide/cs5530.c 2011-10-11 10:44:33.000000000 -0400
27727 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27728 .udma_filter = cs5530_udma_filter,
27729 };
27730
27731 -static const struct ide_port_info cs5530_chipset __devinitdata = {
27732 +static const struct ide_port_info cs5530_chipset __devinitconst = {
27733 .name = DRV_NAME,
27734 .init_chipset = init_chipset_cs5530,
27735 .init_hwif = init_hwif_cs5530,
27736 diff -urNp linux-3.0.8/drivers/ide/cs5535.c linux-3.0.8/drivers/ide/cs5535.c
27737 --- linux-3.0.8/drivers/ide/cs5535.c 2011-07-21 22:17:23.000000000 -0400
27738 +++ linux-3.0.8/drivers/ide/cs5535.c 2011-10-11 10:44:33.000000000 -0400
27739 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27740 .cable_detect = cs5535_cable_detect,
27741 };
27742
27743 -static const struct ide_port_info cs5535_chipset __devinitdata = {
27744 +static const struct ide_port_info cs5535_chipset __devinitconst = {
27745 .name = DRV_NAME,
27746 .port_ops = &cs5535_port_ops,
27747 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27748 diff -urNp linux-3.0.8/drivers/ide/cy82c693.c linux-3.0.8/drivers/ide/cy82c693.c
27749 --- linux-3.0.8/drivers/ide/cy82c693.c 2011-07-21 22:17:23.000000000 -0400
27750 +++ linux-3.0.8/drivers/ide/cy82c693.c 2011-10-11 10:44:33.000000000 -0400
27751 @@ -161,7 +161,7 @@ static const struct ide_port_ops cy82c69
27752 .set_dma_mode = cy82c693_set_dma_mode,
27753 };
27754
27755 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
27756 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
27757 .name = DRV_NAME,
27758 .init_iops = init_iops_cy82c693,
27759 .port_ops = &cy82c693_port_ops,
27760 diff -urNp linux-3.0.8/drivers/ide/hpt366.c linux-3.0.8/drivers/ide/hpt366.c
27761 --- linux-3.0.8/drivers/ide/hpt366.c 2011-07-21 22:17:23.000000000 -0400
27762 +++ linux-3.0.8/drivers/ide/hpt366.c 2011-10-11 10:44:33.000000000 -0400
27763 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27764 }
27765 };
27766
27767 -static const struct hpt_info hpt36x __devinitdata = {
27768 +static const struct hpt_info hpt36x __devinitconst = {
27769 .chip_name = "HPT36x",
27770 .chip_type = HPT36x,
27771 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27772 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27773 .timings = &hpt36x_timings
27774 };
27775
27776 -static const struct hpt_info hpt370 __devinitdata = {
27777 +static const struct hpt_info hpt370 __devinitconst = {
27778 .chip_name = "HPT370",
27779 .chip_type = HPT370,
27780 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27781 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27782 .timings = &hpt37x_timings
27783 };
27784
27785 -static const struct hpt_info hpt370a __devinitdata = {
27786 +static const struct hpt_info hpt370a __devinitconst = {
27787 .chip_name = "HPT370A",
27788 .chip_type = HPT370A,
27789 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27790 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27791 .timings = &hpt37x_timings
27792 };
27793
27794 -static const struct hpt_info hpt374 __devinitdata = {
27795 +static const struct hpt_info hpt374 __devinitconst = {
27796 .chip_name = "HPT374",
27797 .chip_type = HPT374,
27798 .udma_mask = ATA_UDMA5,
27799 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27800 .timings = &hpt37x_timings
27801 };
27802
27803 -static const struct hpt_info hpt372 __devinitdata = {
27804 +static const struct hpt_info hpt372 __devinitconst = {
27805 .chip_name = "HPT372",
27806 .chip_type = HPT372,
27807 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27808 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27809 .timings = &hpt37x_timings
27810 };
27811
27812 -static const struct hpt_info hpt372a __devinitdata = {
27813 +static const struct hpt_info hpt372a __devinitconst = {
27814 .chip_name = "HPT372A",
27815 .chip_type = HPT372A,
27816 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27817 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27818 .timings = &hpt37x_timings
27819 };
27820
27821 -static const struct hpt_info hpt302 __devinitdata = {
27822 +static const struct hpt_info hpt302 __devinitconst = {
27823 .chip_name = "HPT302",
27824 .chip_type = HPT302,
27825 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27826 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27827 .timings = &hpt37x_timings
27828 };
27829
27830 -static const struct hpt_info hpt371 __devinitdata = {
27831 +static const struct hpt_info hpt371 __devinitconst = {
27832 .chip_name = "HPT371",
27833 .chip_type = HPT371,
27834 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27835 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27836 .timings = &hpt37x_timings
27837 };
27838
27839 -static const struct hpt_info hpt372n __devinitdata = {
27840 +static const struct hpt_info hpt372n __devinitconst = {
27841 .chip_name = "HPT372N",
27842 .chip_type = HPT372N,
27843 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27844 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27845 .timings = &hpt37x_timings
27846 };
27847
27848 -static const struct hpt_info hpt302n __devinitdata = {
27849 +static const struct hpt_info hpt302n __devinitconst = {
27850 .chip_name = "HPT302N",
27851 .chip_type = HPT302N,
27852 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27853 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27854 .timings = &hpt37x_timings
27855 };
27856
27857 -static const struct hpt_info hpt371n __devinitdata = {
27858 +static const struct hpt_info hpt371n __devinitconst = {
27859 .chip_name = "HPT371N",
27860 .chip_type = HPT371N,
27861 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27862 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27863 .dma_sff_read_status = ide_dma_sff_read_status,
27864 };
27865
27866 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27867 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27868 { /* 0: HPT36x */
27869 .name = DRV_NAME,
27870 .init_chipset = init_chipset_hpt366,
27871 diff -urNp linux-3.0.8/drivers/ide/ide-cd.c linux-3.0.8/drivers/ide/ide-cd.c
27872 --- linux-3.0.8/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
27873 +++ linux-3.0.8/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
27874 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27875 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27876 if ((unsigned long)buf & alignment
27877 || blk_rq_bytes(rq) & q->dma_pad_mask
27878 - || object_is_on_stack(buf))
27879 + || object_starts_on_stack(buf))
27880 drive->dma = 0;
27881 }
27882 }
27883 diff -urNp linux-3.0.8/drivers/ide/ide-floppy.c linux-3.0.8/drivers/ide/ide-floppy.c
27884 --- linux-3.0.8/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
27885 +++ linux-3.0.8/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
27886 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27887 u8 pc_buf[256], header_len, desc_cnt;
27888 int i, rc = 1, blocks, length;
27889
27890 + pax_track_stack();
27891 +
27892 ide_debug_log(IDE_DBG_FUNC, "enter");
27893
27894 drive->bios_cyl = 0;
27895 diff -urNp linux-3.0.8/drivers/ide/ide-pci-generic.c linux-3.0.8/drivers/ide/ide-pci-generic.c
27896 --- linux-3.0.8/drivers/ide/ide-pci-generic.c 2011-07-21 22:17:23.000000000 -0400
27897 +++ linux-3.0.8/drivers/ide/ide-pci-generic.c 2011-10-11 10:44:33.000000000 -0400
27898 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27899 .udma_mask = ATA_UDMA6, \
27900 }
27901
27902 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
27903 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
27904 /* 0: Unknown */
27905 DECLARE_GENERIC_PCI_DEV(0),
27906
27907 diff -urNp linux-3.0.8/drivers/ide/it8172.c linux-3.0.8/drivers/ide/it8172.c
27908 --- linux-3.0.8/drivers/ide/it8172.c 2011-07-21 22:17:23.000000000 -0400
27909 +++ linux-3.0.8/drivers/ide/it8172.c 2011-10-11 10:44:33.000000000 -0400
27910 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27911 .set_dma_mode = it8172_set_dma_mode,
27912 };
27913
27914 -static const struct ide_port_info it8172_port_info __devinitdata = {
27915 +static const struct ide_port_info it8172_port_info __devinitconst = {
27916 .name = DRV_NAME,
27917 .port_ops = &it8172_port_ops,
27918 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27919 diff -urNp linux-3.0.8/drivers/ide/it8213.c linux-3.0.8/drivers/ide/it8213.c
27920 --- linux-3.0.8/drivers/ide/it8213.c 2011-07-21 22:17:23.000000000 -0400
27921 +++ linux-3.0.8/drivers/ide/it8213.c 2011-10-11 10:44:33.000000000 -0400
27922 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27923 .cable_detect = it8213_cable_detect,
27924 };
27925
27926 -static const struct ide_port_info it8213_chipset __devinitdata = {
27927 +static const struct ide_port_info it8213_chipset __devinitconst = {
27928 .name = DRV_NAME,
27929 .enablebits = { {0x41, 0x80, 0x80} },
27930 .port_ops = &it8213_port_ops,
27931 diff -urNp linux-3.0.8/drivers/ide/it821x.c linux-3.0.8/drivers/ide/it821x.c
27932 --- linux-3.0.8/drivers/ide/it821x.c 2011-07-21 22:17:23.000000000 -0400
27933 +++ linux-3.0.8/drivers/ide/it821x.c 2011-10-11 10:44:33.000000000 -0400
27934 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27935 .cable_detect = it821x_cable_detect,
27936 };
27937
27938 -static const struct ide_port_info it821x_chipset __devinitdata = {
27939 +static const struct ide_port_info it821x_chipset __devinitconst = {
27940 .name = DRV_NAME,
27941 .init_chipset = init_chipset_it821x,
27942 .init_hwif = init_hwif_it821x,
27943 diff -urNp linux-3.0.8/drivers/ide/jmicron.c linux-3.0.8/drivers/ide/jmicron.c
27944 --- linux-3.0.8/drivers/ide/jmicron.c 2011-07-21 22:17:23.000000000 -0400
27945 +++ linux-3.0.8/drivers/ide/jmicron.c 2011-10-11 10:44:33.000000000 -0400
27946 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27947 .cable_detect = jmicron_cable_detect,
27948 };
27949
27950 -static const struct ide_port_info jmicron_chipset __devinitdata = {
27951 +static const struct ide_port_info jmicron_chipset __devinitconst = {
27952 .name = DRV_NAME,
27953 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27954 .port_ops = &jmicron_port_ops,
27955 diff -urNp linux-3.0.8/drivers/ide/ns87415.c linux-3.0.8/drivers/ide/ns87415.c
27956 --- linux-3.0.8/drivers/ide/ns87415.c 2011-07-21 22:17:23.000000000 -0400
27957 +++ linux-3.0.8/drivers/ide/ns87415.c 2011-10-11 10:44:33.000000000 -0400
27958 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27959 .dma_sff_read_status = superio_dma_sff_read_status,
27960 };
27961
27962 -static const struct ide_port_info ns87415_chipset __devinitdata = {
27963 +static const struct ide_port_info ns87415_chipset __devinitconst = {
27964 .name = DRV_NAME,
27965 .init_hwif = init_hwif_ns87415,
27966 .tp_ops = &ns87415_tp_ops,
27967 diff -urNp linux-3.0.8/drivers/ide/opti621.c linux-3.0.8/drivers/ide/opti621.c
27968 --- linux-3.0.8/drivers/ide/opti621.c 2011-07-21 22:17:23.000000000 -0400
27969 +++ linux-3.0.8/drivers/ide/opti621.c 2011-10-11 10:44:33.000000000 -0400
27970 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27971 .set_pio_mode = opti621_set_pio_mode,
27972 };
27973
27974 -static const struct ide_port_info opti621_chipset __devinitdata = {
27975 +static const struct ide_port_info opti621_chipset __devinitconst = {
27976 .name = DRV_NAME,
27977 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27978 .port_ops = &opti621_port_ops,
27979 diff -urNp linux-3.0.8/drivers/ide/pdc202xx_new.c linux-3.0.8/drivers/ide/pdc202xx_new.c
27980 --- linux-3.0.8/drivers/ide/pdc202xx_new.c 2011-07-21 22:17:23.000000000 -0400
27981 +++ linux-3.0.8/drivers/ide/pdc202xx_new.c 2011-10-11 10:44:33.000000000 -0400
27982 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27983 .udma_mask = udma, \
27984 }
27985
27986 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27987 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27988 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27989 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27990 };
27991 diff -urNp linux-3.0.8/drivers/ide/pdc202xx_old.c linux-3.0.8/drivers/ide/pdc202xx_old.c
27992 --- linux-3.0.8/drivers/ide/pdc202xx_old.c 2011-07-21 22:17:23.000000000 -0400
27993 +++ linux-3.0.8/drivers/ide/pdc202xx_old.c 2011-10-11 10:44:33.000000000 -0400
27994 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27995 .max_sectors = sectors, \
27996 }
27997
27998 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27999 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
28000 { /* 0: PDC20246 */
28001 .name = DRV_NAME,
28002 .init_chipset = init_chipset_pdc202xx,
28003 diff -urNp linux-3.0.8/drivers/ide/piix.c linux-3.0.8/drivers/ide/piix.c
28004 --- linux-3.0.8/drivers/ide/piix.c 2011-07-21 22:17:23.000000000 -0400
28005 +++ linux-3.0.8/drivers/ide/piix.c 2011-10-11 10:44:33.000000000 -0400
28006 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
28007 .udma_mask = udma, \
28008 }
28009
28010 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
28011 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
28012 /* 0: MPIIX */
28013 { /*
28014 * MPIIX actually has only a single IDE channel mapped to
28015 diff -urNp linux-3.0.8/drivers/ide/rz1000.c linux-3.0.8/drivers/ide/rz1000.c
28016 --- linux-3.0.8/drivers/ide/rz1000.c 2011-07-21 22:17:23.000000000 -0400
28017 +++ linux-3.0.8/drivers/ide/rz1000.c 2011-10-11 10:44:33.000000000 -0400
28018 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
28019 }
28020 }
28021
28022 -static const struct ide_port_info rz1000_chipset __devinitdata = {
28023 +static const struct ide_port_info rz1000_chipset __devinitconst = {
28024 .name = DRV_NAME,
28025 .host_flags = IDE_HFLAG_NO_DMA,
28026 };
28027 diff -urNp linux-3.0.8/drivers/ide/sc1200.c linux-3.0.8/drivers/ide/sc1200.c
28028 --- linux-3.0.8/drivers/ide/sc1200.c 2011-07-21 22:17:23.000000000 -0400
28029 +++ linux-3.0.8/drivers/ide/sc1200.c 2011-10-11 10:44:33.000000000 -0400
28030 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
28031 .dma_sff_read_status = ide_dma_sff_read_status,
28032 };
28033
28034 -static const struct ide_port_info sc1200_chipset __devinitdata = {
28035 +static const struct ide_port_info sc1200_chipset __devinitconst = {
28036 .name = DRV_NAME,
28037 .port_ops = &sc1200_port_ops,
28038 .dma_ops = &sc1200_dma_ops,
28039 diff -urNp linux-3.0.8/drivers/ide/scc_pata.c linux-3.0.8/drivers/ide/scc_pata.c
28040 --- linux-3.0.8/drivers/ide/scc_pata.c 2011-07-21 22:17:23.000000000 -0400
28041 +++ linux-3.0.8/drivers/ide/scc_pata.c 2011-10-11 10:44:33.000000000 -0400
28042 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
28043 .dma_sff_read_status = scc_dma_sff_read_status,
28044 };
28045
28046 -static const struct ide_port_info scc_chipset __devinitdata = {
28047 +static const struct ide_port_info scc_chipset __devinitconst = {
28048 .name = "sccIDE",
28049 .init_iops = init_iops_scc,
28050 .init_dma = scc_init_dma,
28051 diff -urNp linux-3.0.8/drivers/ide/serverworks.c linux-3.0.8/drivers/ide/serverworks.c
28052 --- linux-3.0.8/drivers/ide/serverworks.c 2011-07-21 22:17:23.000000000 -0400
28053 +++ linux-3.0.8/drivers/ide/serverworks.c 2011-10-11 10:44:33.000000000 -0400
28054 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
28055 .cable_detect = svwks_cable_detect,
28056 };
28057
28058 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
28059 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
28060 { /* 0: OSB4 */
28061 .name = DRV_NAME,
28062 .init_chipset = init_chipset_svwks,
28063 diff -urNp linux-3.0.8/drivers/ide/setup-pci.c linux-3.0.8/drivers/ide/setup-pci.c
28064 --- linux-3.0.8/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
28065 +++ linux-3.0.8/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
28066 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28067 int ret, i, n_ports = dev2 ? 4 : 2;
28068 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28069
28070 + pax_track_stack();
28071 +
28072 for (i = 0; i < n_ports / 2; i++) {
28073 ret = ide_setup_pci_controller(pdev[i], d, !i);
28074 if (ret < 0)
28075 diff -urNp linux-3.0.8/drivers/ide/siimage.c linux-3.0.8/drivers/ide/siimage.c
28076 --- linux-3.0.8/drivers/ide/siimage.c 2011-07-21 22:17:23.000000000 -0400
28077 +++ linux-3.0.8/drivers/ide/siimage.c 2011-10-11 10:44:33.000000000 -0400
28078 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
28079 .udma_mask = ATA_UDMA6, \
28080 }
28081
28082 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
28083 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
28084 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
28085 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
28086 };
28087 diff -urNp linux-3.0.8/drivers/ide/sis5513.c linux-3.0.8/drivers/ide/sis5513.c
28088 --- linux-3.0.8/drivers/ide/sis5513.c 2011-07-21 22:17:23.000000000 -0400
28089 +++ linux-3.0.8/drivers/ide/sis5513.c 2011-10-11 10:44:33.000000000 -0400
28090 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
28091 .cable_detect = sis_cable_detect,
28092 };
28093
28094 -static const struct ide_port_info sis5513_chipset __devinitdata = {
28095 +static const struct ide_port_info sis5513_chipset __devinitconst = {
28096 .name = DRV_NAME,
28097 .init_chipset = init_chipset_sis5513,
28098 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
28099 diff -urNp linux-3.0.8/drivers/ide/sl82c105.c linux-3.0.8/drivers/ide/sl82c105.c
28100 --- linux-3.0.8/drivers/ide/sl82c105.c 2011-07-21 22:17:23.000000000 -0400
28101 +++ linux-3.0.8/drivers/ide/sl82c105.c 2011-10-11 10:44:33.000000000 -0400
28102 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
28103 .dma_sff_read_status = ide_dma_sff_read_status,
28104 };
28105
28106 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
28107 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
28108 .name = DRV_NAME,
28109 .init_chipset = init_chipset_sl82c105,
28110 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
28111 diff -urNp linux-3.0.8/drivers/ide/slc90e66.c linux-3.0.8/drivers/ide/slc90e66.c
28112 --- linux-3.0.8/drivers/ide/slc90e66.c 2011-07-21 22:17:23.000000000 -0400
28113 +++ linux-3.0.8/drivers/ide/slc90e66.c 2011-10-11 10:44:33.000000000 -0400
28114 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
28115 .cable_detect = slc90e66_cable_detect,
28116 };
28117
28118 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
28119 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
28120 .name = DRV_NAME,
28121 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
28122 .port_ops = &slc90e66_port_ops,
28123 diff -urNp linux-3.0.8/drivers/ide/tc86c001.c linux-3.0.8/drivers/ide/tc86c001.c
28124 --- linux-3.0.8/drivers/ide/tc86c001.c 2011-07-21 22:17:23.000000000 -0400
28125 +++ linux-3.0.8/drivers/ide/tc86c001.c 2011-10-11 10:44:33.000000000 -0400
28126 @@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
28127 .dma_sff_read_status = ide_dma_sff_read_status,
28128 };
28129
28130 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
28131 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
28132 .name = DRV_NAME,
28133 .init_hwif = init_hwif_tc86c001,
28134 .port_ops = &tc86c001_port_ops,
28135 diff -urNp linux-3.0.8/drivers/ide/triflex.c linux-3.0.8/drivers/ide/triflex.c
28136 --- linux-3.0.8/drivers/ide/triflex.c 2011-07-21 22:17:23.000000000 -0400
28137 +++ linux-3.0.8/drivers/ide/triflex.c 2011-10-11 10:44:33.000000000 -0400
28138 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
28139 .set_dma_mode = triflex_set_mode,
28140 };
28141
28142 -static const struct ide_port_info triflex_device __devinitdata = {
28143 +static const struct ide_port_info triflex_device __devinitconst = {
28144 .name = DRV_NAME,
28145 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
28146 .port_ops = &triflex_port_ops,
28147 diff -urNp linux-3.0.8/drivers/ide/trm290.c linux-3.0.8/drivers/ide/trm290.c
28148 --- linux-3.0.8/drivers/ide/trm290.c 2011-07-21 22:17:23.000000000 -0400
28149 +++ linux-3.0.8/drivers/ide/trm290.c 2011-10-11 10:44:33.000000000 -0400
28150 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
28151 .dma_check = trm290_dma_check,
28152 };
28153
28154 -static const struct ide_port_info trm290_chipset __devinitdata = {
28155 +static const struct ide_port_info trm290_chipset __devinitconst = {
28156 .name = DRV_NAME,
28157 .init_hwif = init_hwif_trm290,
28158 .tp_ops = &trm290_tp_ops,
28159 diff -urNp linux-3.0.8/drivers/ide/via82cxxx.c linux-3.0.8/drivers/ide/via82cxxx.c
28160 --- linux-3.0.8/drivers/ide/via82cxxx.c 2011-07-21 22:17:23.000000000 -0400
28161 +++ linux-3.0.8/drivers/ide/via82cxxx.c 2011-10-11 10:44:33.000000000 -0400
28162 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
28163 .cable_detect = via82cxxx_cable_detect,
28164 };
28165
28166 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
28167 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
28168 .name = DRV_NAME,
28169 .init_chipset = init_chipset_via82cxxx,
28170 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
28171 diff -urNp linux-3.0.8/drivers/infiniband/core/cm.c linux-3.0.8/drivers/infiniband/core/cm.c
28172 --- linux-3.0.8/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
28173 +++ linux-3.0.8/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
28174 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
28175
28176 struct cm_counter_group {
28177 struct kobject obj;
28178 - atomic_long_t counter[CM_ATTR_COUNT];
28179 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
28180 };
28181
28182 struct cm_counter_attribute {
28183 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
28184 struct ib_mad_send_buf *msg = NULL;
28185 int ret;
28186
28187 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28188 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28189 counter[CM_REQ_COUNTER]);
28190
28191 /* Quick state check to discard duplicate REQs. */
28192 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
28193 if (!cm_id_priv)
28194 return;
28195
28196 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28197 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28198 counter[CM_REP_COUNTER]);
28199 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
28200 if (ret)
28201 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
28202 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
28203 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
28204 spin_unlock_irq(&cm_id_priv->lock);
28205 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28206 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28207 counter[CM_RTU_COUNTER]);
28208 goto out;
28209 }
28210 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
28211 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
28212 dreq_msg->local_comm_id);
28213 if (!cm_id_priv) {
28214 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28215 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28216 counter[CM_DREQ_COUNTER]);
28217 cm_issue_drep(work->port, work->mad_recv_wc);
28218 return -EINVAL;
28219 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
28220 case IB_CM_MRA_REP_RCVD:
28221 break;
28222 case IB_CM_TIMEWAIT:
28223 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28224 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28225 counter[CM_DREQ_COUNTER]);
28226 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28227 goto unlock;
28228 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
28229 cm_free_msg(msg);
28230 goto deref;
28231 case IB_CM_DREQ_RCVD:
28232 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28233 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28234 counter[CM_DREQ_COUNTER]);
28235 goto unlock;
28236 default:
28237 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
28238 ib_modify_mad(cm_id_priv->av.port->mad_agent,
28239 cm_id_priv->msg, timeout)) {
28240 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
28241 - atomic_long_inc(&work->port->
28242 + atomic_long_inc_unchecked(&work->port->
28243 counter_group[CM_RECV_DUPLICATES].
28244 counter[CM_MRA_COUNTER]);
28245 goto out;
28246 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
28247 break;
28248 case IB_CM_MRA_REQ_RCVD:
28249 case IB_CM_MRA_REP_RCVD:
28250 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28251 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28252 counter[CM_MRA_COUNTER]);
28253 /* fall through */
28254 default:
28255 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
28256 case IB_CM_LAP_IDLE:
28257 break;
28258 case IB_CM_MRA_LAP_SENT:
28259 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28260 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28261 counter[CM_LAP_COUNTER]);
28262 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28263 goto unlock;
28264 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
28265 cm_free_msg(msg);
28266 goto deref;
28267 case IB_CM_LAP_RCVD:
28268 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28269 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28270 counter[CM_LAP_COUNTER]);
28271 goto unlock;
28272 default:
28273 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
28274 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
28275 if (cur_cm_id_priv) {
28276 spin_unlock_irq(&cm.lock);
28277 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28278 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28279 counter[CM_SIDR_REQ_COUNTER]);
28280 goto out; /* Duplicate message. */
28281 }
28282 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
28283 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
28284 msg->retries = 1;
28285
28286 - atomic_long_add(1 + msg->retries,
28287 + atomic_long_add_unchecked(1 + msg->retries,
28288 &port->counter_group[CM_XMIT].counter[attr_index]);
28289 if (msg->retries)
28290 - atomic_long_add(msg->retries,
28291 + atomic_long_add_unchecked(msg->retries,
28292 &port->counter_group[CM_XMIT_RETRIES].
28293 counter[attr_index]);
28294
28295 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
28296 }
28297
28298 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
28299 - atomic_long_inc(&port->counter_group[CM_RECV].
28300 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
28301 counter[attr_id - CM_ATTR_ID_OFFSET]);
28302
28303 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
28304 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
28305 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
28306
28307 return sprintf(buf, "%ld\n",
28308 - atomic_long_read(&group->counter[cm_attr->index]));
28309 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
28310 }
28311
28312 static const struct sysfs_ops cm_counter_ops = {
28313 diff -urNp linux-3.0.8/drivers/infiniband/core/fmr_pool.c linux-3.0.8/drivers/infiniband/core/fmr_pool.c
28314 --- linux-3.0.8/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
28315 +++ linux-3.0.8/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
28316 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
28317
28318 struct task_struct *thread;
28319
28320 - atomic_t req_ser;
28321 - atomic_t flush_ser;
28322 + atomic_unchecked_t req_ser;
28323 + atomic_unchecked_t flush_ser;
28324
28325 wait_queue_head_t force_wait;
28326 };
28327 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
28328 struct ib_fmr_pool *pool = pool_ptr;
28329
28330 do {
28331 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
28332 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
28333 ib_fmr_batch_release(pool);
28334
28335 - atomic_inc(&pool->flush_ser);
28336 + atomic_inc_unchecked(&pool->flush_ser);
28337 wake_up_interruptible(&pool->force_wait);
28338
28339 if (pool->flush_function)
28340 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
28341 }
28342
28343 set_current_state(TASK_INTERRUPTIBLE);
28344 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
28345 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
28346 !kthread_should_stop())
28347 schedule();
28348 __set_current_state(TASK_RUNNING);
28349 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
28350 pool->dirty_watermark = params->dirty_watermark;
28351 pool->dirty_len = 0;
28352 spin_lock_init(&pool->pool_lock);
28353 - atomic_set(&pool->req_ser, 0);
28354 - atomic_set(&pool->flush_ser, 0);
28355 + atomic_set_unchecked(&pool->req_ser, 0);
28356 + atomic_set_unchecked(&pool->flush_ser, 0);
28357 init_waitqueue_head(&pool->force_wait);
28358
28359 pool->thread = kthread_run(ib_fmr_cleanup_thread,
28360 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
28361 }
28362 spin_unlock_irq(&pool->pool_lock);
28363
28364 - serial = atomic_inc_return(&pool->req_ser);
28365 + serial = atomic_inc_return_unchecked(&pool->req_ser);
28366 wake_up_process(pool->thread);
28367
28368 if (wait_event_interruptible(pool->force_wait,
28369 - atomic_read(&pool->flush_ser) - serial >= 0))
28370 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
28371 return -EINTR;
28372
28373 return 0;
28374 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
28375 } else {
28376 list_add_tail(&fmr->list, &pool->dirty_list);
28377 if (++pool->dirty_len >= pool->dirty_watermark) {
28378 - atomic_inc(&pool->req_ser);
28379 + atomic_inc_unchecked(&pool->req_ser);
28380 wake_up_process(pool->thread);
28381 }
28382 }
28383 diff -urNp linux-3.0.8/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.8/drivers/infiniband/hw/cxgb4/mem.c
28384 --- linux-3.0.8/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
28385 +++ linux-3.0.8/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
28386 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
28387 int err;
28388 struct fw_ri_tpte tpt;
28389 u32 stag_idx;
28390 - static atomic_t key;
28391 + static atomic_unchecked_t key;
28392
28393 if (c4iw_fatal_error(rdev))
28394 return -EIO;
28395 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
28396 &rdev->resource.tpt_fifo_lock);
28397 if (!stag_idx)
28398 return -ENOMEM;
28399 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
28400 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
28401 }
28402 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
28403 __func__, stag_state, type, pdid, stag_idx);
28404 diff -urNp linux-3.0.8/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.8/drivers/infiniband/hw/ipath/ipath_fs.c
28405 --- linux-3.0.8/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
28406 +++ linux-3.0.8/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
28407 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
28408 struct infinipath_counters counters;
28409 struct ipath_devdata *dd;
28410
28411 + pax_track_stack();
28412 +
28413 dd = file->f_path.dentry->d_inode->i_private;
28414 dd->ipath_f_read_counters(dd, &counters);
28415
28416 diff -urNp linux-3.0.8/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.8/drivers/infiniband/hw/ipath/ipath_rc.c
28417 --- linux-3.0.8/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
28418 +++ linux-3.0.8/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
28419 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28420 struct ib_atomic_eth *ateth;
28421 struct ipath_ack_entry *e;
28422 u64 vaddr;
28423 - atomic64_t *maddr;
28424 + atomic64_unchecked_t *maddr;
28425 u64 sdata;
28426 u32 rkey;
28427 u8 next;
28428 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28429 IB_ACCESS_REMOTE_ATOMIC)))
28430 goto nack_acc_unlck;
28431 /* Perform atomic OP and save result. */
28432 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28433 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28434 sdata = be64_to_cpu(ateth->swap_data);
28435 e = &qp->s_ack_queue[qp->r_head_ack_queue];
28436 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
28437 - (u64) atomic64_add_return(sdata, maddr) - sdata :
28438 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28439 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28440 be64_to_cpu(ateth->compare_data),
28441 sdata);
28442 diff -urNp linux-3.0.8/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.8/drivers/infiniband/hw/ipath/ipath_ruc.c
28443 --- linux-3.0.8/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
28444 +++ linux-3.0.8/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
28445 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
28446 unsigned long flags;
28447 struct ib_wc wc;
28448 u64 sdata;
28449 - atomic64_t *maddr;
28450 + atomic64_unchecked_t *maddr;
28451 enum ib_wc_status send_status;
28452
28453 /*
28454 @@ -382,11 +382,11 @@ again:
28455 IB_ACCESS_REMOTE_ATOMIC)))
28456 goto acc_err;
28457 /* Perform atomic OP and save result. */
28458 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28459 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28460 sdata = wqe->wr.wr.atomic.compare_add;
28461 *(u64 *) sqp->s_sge.sge.vaddr =
28462 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
28463 - (u64) atomic64_add_return(sdata, maddr) - sdata :
28464 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28465 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28466 sdata, wqe->wr.wr.atomic.swap);
28467 goto send_comp;
28468 diff -urNp linux-3.0.8/drivers/infiniband/hw/nes/nes.c linux-3.0.8/drivers/infiniband/hw/nes/nes.c
28469 --- linux-3.0.8/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
28470 +++ linux-3.0.8/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
28471 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
28472 LIST_HEAD(nes_adapter_list);
28473 static LIST_HEAD(nes_dev_list);
28474
28475 -atomic_t qps_destroyed;
28476 +atomic_unchecked_t qps_destroyed;
28477
28478 static unsigned int ee_flsh_adapter;
28479 static unsigned int sysfs_nonidx_addr;
28480 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
28481 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
28482 struct nes_adapter *nesadapter = nesdev->nesadapter;
28483
28484 - atomic_inc(&qps_destroyed);
28485 + atomic_inc_unchecked(&qps_destroyed);
28486
28487 /* Free the control structures */
28488
28489 diff -urNp linux-3.0.8/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.8/drivers/infiniband/hw/nes/nes_cm.c
28490 --- linux-3.0.8/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
28491 +++ linux-3.0.8/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
28492 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
28493 u32 cm_packets_retrans;
28494 u32 cm_packets_created;
28495 u32 cm_packets_received;
28496 -atomic_t cm_listens_created;
28497 -atomic_t cm_listens_destroyed;
28498 +atomic_unchecked_t cm_listens_created;
28499 +atomic_unchecked_t cm_listens_destroyed;
28500 u32 cm_backlog_drops;
28501 -atomic_t cm_loopbacks;
28502 -atomic_t cm_nodes_created;
28503 -atomic_t cm_nodes_destroyed;
28504 -atomic_t cm_accel_dropped_pkts;
28505 -atomic_t cm_resets_recvd;
28506 +atomic_unchecked_t cm_loopbacks;
28507 +atomic_unchecked_t cm_nodes_created;
28508 +atomic_unchecked_t cm_nodes_destroyed;
28509 +atomic_unchecked_t cm_accel_dropped_pkts;
28510 +atomic_unchecked_t cm_resets_recvd;
28511
28512 static inline int mini_cm_accelerated(struct nes_cm_core *,
28513 struct nes_cm_node *);
28514 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
28515
28516 static struct nes_cm_core *g_cm_core;
28517
28518 -atomic_t cm_connects;
28519 -atomic_t cm_accepts;
28520 -atomic_t cm_disconnects;
28521 -atomic_t cm_closes;
28522 -atomic_t cm_connecteds;
28523 -atomic_t cm_connect_reqs;
28524 -atomic_t cm_rejects;
28525 +atomic_unchecked_t cm_connects;
28526 +atomic_unchecked_t cm_accepts;
28527 +atomic_unchecked_t cm_disconnects;
28528 +atomic_unchecked_t cm_closes;
28529 +atomic_unchecked_t cm_connecteds;
28530 +atomic_unchecked_t cm_connect_reqs;
28531 +atomic_unchecked_t cm_rejects;
28532
28533
28534 /**
28535 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
28536 kfree(listener);
28537 listener = NULL;
28538 ret = 0;
28539 - atomic_inc(&cm_listens_destroyed);
28540 + atomic_inc_unchecked(&cm_listens_destroyed);
28541 } else {
28542 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
28543 }
28544 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
28545 cm_node->rem_mac);
28546
28547 add_hte_node(cm_core, cm_node);
28548 - atomic_inc(&cm_nodes_created);
28549 + atomic_inc_unchecked(&cm_nodes_created);
28550
28551 return cm_node;
28552 }
28553 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
28554 }
28555
28556 atomic_dec(&cm_core->node_cnt);
28557 - atomic_inc(&cm_nodes_destroyed);
28558 + atomic_inc_unchecked(&cm_nodes_destroyed);
28559 nesqp = cm_node->nesqp;
28560 if (nesqp) {
28561 nesqp->cm_node = NULL;
28562 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
28563
28564 static void drop_packet(struct sk_buff *skb)
28565 {
28566 - atomic_inc(&cm_accel_dropped_pkts);
28567 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
28568 dev_kfree_skb_any(skb);
28569 }
28570
28571 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28572 {
28573
28574 int reset = 0; /* whether to send reset in case of err.. */
28575 - atomic_inc(&cm_resets_recvd);
28576 + atomic_inc_unchecked(&cm_resets_recvd);
28577 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28578 " refcnt=%d\n", cm_node, cm_node->state,
28579 atomic_read(&cm_node->ref_count));
28580 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28581 rem_ref_cm_node(cm_node->cm_core, cm_node);
28582 return NULL;
28583 }
28584 - atomic_inc(&cm_loopbacks);
28585 + atomic_inc_unchecked(&cm_loopbacks);
28586 loopbackremotenode->loopbackpartner = cm_node;
28587 loopbackremotenode->tcp_cntxt.rcv_wscale =
28588 NES_CM_DEFAULT_RCV_WND_SCALE;
28589 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28590 add_ref_cm_node(cm_node);
28591 } else if (cm_node->state == NES_CM_STATE_TSA) {
28592 rem_ref_cm_node(cm_core, cm_node);
28593 - atomic_inc(&cm_accel_dropped_pkts);
28594 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
28595 dev_kfree_skb_any(skb);
28596 break;
28597 }
28598 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28599
28600 if ((cm_id) && (cm_id->event_handler)) {
28601 if (issue_disconn) {
28602 - atomic_inc(&cm_disconnects);
28603 + atomic_inc_unchecked(&cm_disconnects);
28604 cm_event.event = IW_CM_EVENT_DISCONNECT;
28605 cm_event.status = disconn_status;
28606 cm_event.local_addr = cm_id->local_addr;
28607 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28608 }
28609
28610 if (issue_close) {
28611 - atomic_inc(&cm_closes);
28612 + atomic_inc_unchecked(&cm_closes);
28613 nes_disconnect(nesqp, 1);
28614
28615 cm_id->provider_data = nesqp;
28616 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28617
28618 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28619 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28620 - atomic_inc(&cm_accepts);
28621 + atomic_inc_unchecked(&cm_accepts);
28622
28623 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28624 netdev_refcnt_read(nesvnic->netdev));
28625 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28626
28627 struct nes_cm_core *cm_core;
28628
28629 - atomic_inc(&cm_rejects);
28630 + atomic_inc_unchecked(&cm_rejects);
28631 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28632 loopback = cm_node->loopbackpartner;
28633 cm_core = cm_node->cm_core;
28634 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28635 ntohl(cm_id->local_addr.sin_addr.s_addr),
28636 ntohs(cm_id->local_addr.sin_port));
28637
28638 - atomic_inc(&cm_connects);
28639 + atomic_inc_unchecked(&cm_connects);
28640 nesqp->active_conn = 1;
28641
28642 /* cache the cm_id in the qp */
28643 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28644 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28645 return err;
28646 }
28647 - atomic_inc(&cm_listens_created);
28648 + atomic_inc_unchecked(&cm_listens_created);
28649 }
28650
28651 cm_id->add_ref(cm_id);
28652 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28653 if (nesqp->destroyed) {
28654 return;
28655 }
28656 - atomic_inc(&cm_connecteds);
28657 + atomic_inc_unchecked(&cm_connecteds);
28658 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28659 " local port 0x%04X. jiffies = %lu.\n",
28660 nesqp->hwqp.qp_id,
28661 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28662
28663 cm_id->add_ref(cm_id);
28664 ret = cm_id->event_handler(cm_id, &cm_event);
28665 - atomic_inc(&cm_closes);
28666 + atomic_inc_unchecked(&cm_closes);
28667 cm_event.event = IW_CM_EVENT_CLOSE;
28668 cm_event.status = 0;
28669 cm_event.provider_data = cm_id->provider_data;
28670 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28671 return;
28672 cm_id = cm_node->cm_id;
28673
28674 - atomic_inc(&cm_connect_reqs);
28675 + atomic_inc_unchecked(&cm_connect_reqs);
28676 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28677 cm_node, cm_id, jiffies);
28678
28679 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28680 return;
28681 cm_id = cm_node->cm_id;
28682
28683 - atomic_inc(&cm_connect_reqs);
28684 + atomic_inc_unchecked(&cm_connect_reqs);
28685 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28686 cm_node, cm_id, jiffies);
28687
28688 diff -urNp linux-3.0.8/drivers/infiniband/hw/nes/nes.h linux-3.0.8/drivers/infiniband/hw/nes/nes.h
28689 --- linux-3.0.8/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
28690 +++ linux-3.0.8/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
28691 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28692 extern unsigned int wqm_quanta;
28693 extern struct list_head nes_adapter_list;
28694
28695 -extern atomic_t cm_connects;
28696 -extern atomic_t cm_accepts;
28697 -extern atomic_t cm_disconnects;
28698 -extern atomic_t cm_closes;
28699 -extern atomic_t cm_connecteds;
28700 -extern atomic_t cm_connect_reqs;
28701 -extern atomic_t cm_rejects;
28702 -extern atomic_t mod_qp_timouts;
28703 -extern atomic_t qps_created;
28704 -extern atomic_t qps_destroyed;
28705 -extern atomic_t sw_qps_destroyed;
28706 +extern atomic_unchecked_t cm_connects;
28707 +extern atomic_unchecked_t cm_accepts;
28708 +extern atomic_unchecked_t cm_disconnects;
28709 +extern atomic_unchecked_t cm_closes;
28710 +extern atomic_unchecked_t cm_connecteds;
28711 +extern atomic_unchecked_t cm_connect_reqs;
28712 +extern atomic_unchecked_t cm_rejects;
28713 +extern atomic_unchecked_t mod_qp_timouts;
28714 +extern atomic_unchecked_t qps_created;
28715 +extern atomic_unchecked_t qps_destroyed;
28716 +extern atomic_unchecked_t sw_qps_destroyed;
28717 extern u32 mh_detected;
28718 extern u32 mh_pauses_sent;
28719 extern u32 cm_packets_sent;
28720 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28721 extern u32 cm_packets_received;
28722 extern u32 cm_packets_dropped;
28723 extern u32 cm_packets_retrans;
28724 -extern atomic_t cm_listens_created;
28725 -extern atomic_t cm_listens_destroyed;
28726 +extern atomic_unchecked_t cm_listens_created;
28727 +extern atomic_unchecked_t cm_listens_destroyed;
28728 extern u32 cm_backlog_drops;
28729 -extern atomic_t cm_loopbacks;
28730 -extern atomic_t cm_nodes_created;
28731 -extern atomic_t cm_nodes_destroyed;
28732 -extern atomic_t cm_accel_dropped_pkts;
28733 -extern atomic_t cm_resets_recvd;
28734 +extern atomic_unchecked_t cm_loopbacks;
28735 +extern atomic_unchecked_t cm_nodes_created;
28736 +extern atomic_unchecked_t cm_nodes_destroyed;
28737 +extern atomic_unchecked_t cm_accel_dropped_pkts;
28738 +extern atomic_unchecked_t cm_resets_recvd;
28739
28740 extern u32 int_mod_timer_init;
28741 extern u32 int_mod_cq_depth_256;
28742 diff -urNp linux-3.0.8/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.8/drivers/infiniband/hw/nes/nes_nic.c
28743 --- linux-3.0.8/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
28744 +++ linux-3.0.8/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
28745 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28746 target_stat_values[++index] = mh_detected;
28747 target_stat_values[++index] = mh_pauses_sent;
28748 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28749 - target_stat_values[++index] = atomic_read(&cm_connects);
28750 - target_stat_values[++index] = atomic_read(&cm_accepts);
28751 - target_stat_values[++index] = atomic_read(&cm_disconnects);
28752 - target_stat_values[++index] = atomic_read(&cm_connecteds);
28753 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28754 - target_stat_values[++index] = atomic_read(&cm_rejects);
28755 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28756 - target_stat_values[++index] = atomic_read(&qps_created);
28757 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28758 - target_stat_values[++index] = atomic_read(&qps_destroyed);
28759 - target_stat_values[++index] = atomic_read(&cm_closes);
28760 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28761 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28762 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28763 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28764 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28765 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28766 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28767 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28768 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28769 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28770 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28771 target_stat_values[++index] = cm_packets_sent;
28772 target_stat_values[++index] = cm_packets_bounced;
28773 target_stat_values[++index] = cm_packets_created;
28774 target_stat_values[++index] = cm_packets_received;
28775 target_stat_values[++index] = cm_packets_dropped;
28776 target_stat_values[++index] = cm_packets_retrans;
28777 - target_stat_values[++index] = atomic_read(&cm_listens_created);
28778 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28779 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28780 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28781 target_stat_values[++index] = cm_backlog_drops;
28782 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
28783 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
28784 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28785 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28786 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28787 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28788 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28789 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28790 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28791 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28792 target_stat_values[++index] = nesadapter->free_4kpbl;
28793 target_stat_values[++index] = nesadapter->free_256pbl;
28794 target_stat_values[++index] = int_mod_timer_init;
28795 diff -urNp linux-3.0.8/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.8/drivers/infiniband/hw/nes/nes_verbs.c
28796 --- linux-3.0.8/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
28797 +++ linux-3.0.8/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
28798 @@ -46,9 +46,9 @@
28799
28800 #include <rdma/ib_umem.h>
28801
28802 -atomic_t mod_qp_timouts;
28803 -atomic_t qps_created;
28804 -atomic_t sw_qps_destroyed;
28805 +atomic_unchecked_t mod_qp_timouts;
28806 +atomic_unchecked_t qps_created;
28807 +atomic_unchecked_t sw_qps_destroyed;
28808
28809 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28810
28811 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
28812 if (init_attr->create_flags)
28813 return ERR_PTR(-EINVAL);
28814
28815 - atomic_inc(&qps_created);
28816 + atomic_inc_unchecked(&qps_created);
28817 switch (init_attr->qp_type) {
28818 case IB_QPT_RC:
28819 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28820 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
28821 struct iw_cm_event cm_event;
28822 int ret;
28823
28824 - atomic_inc(&sw_qps_destroyed);
28825 + atomic_inc_unchecked(&sw_qps_destroyed);
28826 nesqp->destroyed = 1;
28827
28828 /* Blow away the connection if it exists. */
28829 diff -urNp linux-3.0.8/drivers/infiniband/hw/qib/qib.h linux-3.0.8/drivers/infiniband/hw/qib/qib.h
28830 --- linux-3.0.8/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
28831 +++ linux-3.0.8/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
28832 @@ -51,6 +51,7 @@
28833 #include <linux/completion.h>
28834 #include <linux/kref.h>
28835 #include <linux/sched.h>
28836 +#include <linux/slab.h>
28837
28838 #include "qib_common.h"
28839 #include "qib_verbs.h"
28840 diff -urNp linux-3.0.8/drivers/input/gameport/gameport.c linux-3.0.8/drivers/input/gameport/gameport.c
28841 --- linux-3.0.8/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
28842 +++ linux-3.0.8/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
28843 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28844 */
28845 static void gameport_init_port(struct gameport *gameport)
28846 {
28847 - static atomic_t gameport_no = ATOMIC_INIT(0);
28848 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28849
28850 __module_get(THIS_MODULE);
28851
28852 mutex_init(&gameport->drv_mutex);
28853 device_initialize(&gameport->dev);
28854 dev_set_name(&gameport->dev, "gameport%lu",
28855 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
28856 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28857 gameport->dev.bus = &gameport_bus;
28858 gameport->dev.release = gameport_release_port;
28859 if (gameport->parent)
28860 diff -urNp linux-3.0.8/drivers/input/input.c linux-3.0.8/drivers/input/input.c
28861 --- linux-3.0.8/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
28862 +++ linux-3.0.8/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
28863 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28864 */
28865 int input_register_device(struct input_dev *dev)
28866 {
28867 - static atomic_t input_no = ATOMIC_INIT(0);
28868 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28869 struct input_handler *handler;
28870 const char *path;
28871 int error;
28872 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28873 dev->setkeycode = input_default_setkeycode;
28874
28875 dev_set_name(&dev->dev, "input%ld",
28876 - (unsigned long) atomic_inc_return(&input_no) - 1);
28877 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28878
28879 error = device_add(&dev->dev);
28880 if (error)
28881 diff -urNp linux-3.0.8/drivers/input/joystick/sidewinder.c linux-3.0.8/drivers/input/joystick/sidewinder.c
28882 --- linux-3.0.8/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
28883 +++ linux-3.0.8/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
28884 @@ -30,6 +30,7 @@
28885 #include <linux/kernel.h>
28886 #include <linux/module.h>
28887 #include <linux/slab.h>
28888 +#include <linux/sched.h>
28889 #include <linux/init.h>
28890 #include <linux/input.h>
28891 #include <linux/gameport.h>
28892 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28893 unsigned char buf[SW_LENGTH];
28894 int i;
28895
28896 + pax_track_stack();
28897 +
28898 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28899
28900 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28901 diff -urNp linux-3.0.8/drivers/input/joystick/xpad.c linux-3.0.8/drivers/input/joystick/xpad.c
28902 --- linux-3.0.8/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
28903 +++ linux-3.0.8/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
28904 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
28905
28906 static int xpad_led_probe(struct usb_xpad *xpad)
28907 {
28908 - static atomic_t led_seq = ATOMIC_INIT(0);
28909 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28910 long led_no;
28911 struct xpad_led *led;
28912 struct led_classdev *led_cdev;
28913 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
28914 if (!led)
28915 return -ENOMEM;
28916
28917 - led_no = (long)atomic_inc_return(&led_seq) - 1;
28918 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28919
28920 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28921 led->xpad = xpad;
28922 diff -urNp linux-3.0.8/drivers/input/mousedev.c linux-3.0.8/drivers/input/mousedev.c
28923 --- linux-3.0.8/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
28924 +++ linux-3.0.8/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
28925 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28926
28927 spin_unlock_irq(&client->packet_lock);
28928
28929 - if (copy_to_user(buffer, data, count))
28930 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
28931 return -EFAULT;
28932
28933 return count;
28934 diff -urNp linux-3.0.8/drivers/input/serio/serio.c linux-3.0.8/drivers/input/serio/serio.c
28935 --- linux-3.0.8/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
28936 +++ linux-3.0.8/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
28937 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
28938 */
28939 static void serio_init_port(struct serio *serio)
28940 {
28941 - static atomic_t serio_no = ATOMIC_INIT(0);
28942 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28943
28944 __module_get(THIS_MODULE);
28945
28946 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28947 mutex_init(&serio->drv_mutex);
28948 device_initialize(&serio->dev);
28949 dev_set_name(&serio->dev, "serio%ld",
28950 - (long)atomic_inc_return(&serio_no) - 1);
28951 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
28952 serio->dev.bus = &serio_bus;
28953 serio->dev.release = serio_release_port;
28954 serio->dev.groups = serio_device_attr_groups;
28955 diff -urNp linux-3.0.8/drivers/isdn/capi/capi.c linux-3.0.8/drivers/isdn/capi/capi.c
28956 --- linux-3.0.8/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
28957 +++ linux-3.0.8/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
28958 @@ -83,8 +83,8 @@ struct capiminor {
28959
28960 struct capi20_appl *ap;
28961 u32 ncci;
28962 - atomic_t datahandle;
28963 - atomic_t msgid;
28964 + atomic_unchecked_t datahandle;
28965 + atomic_unchecked_t msgid;
28966
28967 struct tty_port port;
28968 int ttyinstop;
28969 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28970 capimsg_setu16(s, 2, mp->ap->applid);
28971 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28972 capimsg_setu8 (s, 5, CAPI_RESP);
28973 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28974 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28975 capimsg_setu32(s, 8, mp->ncci);
28976 capimsg_setu16(s, 12, datahandle);
28977 }
28978 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28979 mp->outbytes -= len;
28980 spin_unlock_bh(&mp->outlock);
28981
28982 - datahandle = atomic_inc_return(&mp->datahandle);
28983 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28984 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28985 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28986 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28987 capimsg_setu16(skb->data, 2, mp->ap->applid);
28988 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28989 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28990 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28991 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28992 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28993 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28994 capimsg_setu16(skb->data, 16, len); /* Data length */
28995 diff -urNp linux-3.0.8/drivers/isdn/gigaset/common.c linux-3.0.8/drivers/isdn/gigaset/common.c
28996 --- linux-3.0.8/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
28997 +++ linux-3.0.8/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
28998 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28999 cs->commands_pending = 0;
29000 cs->cur_at_seq = 0;
29001 cs->gotfwver = -1;
29002 - cs->open_count = 0;
29003 + local_set(&cs->open_count, 0);
29004 cs->dev = NULL;
29005 cs->tty = NULL;
29006 cs->tty_dev = NULL;
29007 diff -urNp linux-3.0.8/drivers/isdn/gigaset/gigaset.h linux-3.0.8/drivers/isdn/gigaset/gigaset.h
29008 --- linux-3.0.8/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
29009 +++ linux-3.0.8/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
29010 @@ -35,6 +35,7 @@
29011 #include <linux/tty_driver.h>
29012 #include <linux/list.h>
29013 #include <asm/atomic.h>
29014 +#include <asm/local.h>
29015
29016 #define GIG_VERSION {0, 5, 0, 0}
29017 #define GIG_COMPAT {0, 4, 0, 0}
29018 @@ -433,7 +434,7 @@ struct cardstate {
29019 spinlock_t cmdlock;
29020 unsigned curlen, cmdbytes;
29021
29022 - unsigned open_count;
29023 + local_t open_count;
29024 struct tty_struct *tty;
29025 struct tasklet_struct if_wake_tasklet;
29026 unsigned control_state;
29027 diff -urNp linux-3.0.8/drivers/isdn/gigaset/interface.c linux-3.0.8/drivers/isdn/gigaset/interface.c
29028 --- linux-3.0.8/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
29029 +++ linux-3.0.8/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
29030 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
29031 }
29032 tty->driver_data = cs;
29033
29034 - ++cs->open_count;
29035 -
29036 - if (cs->open_count == 1) {
29037 + if (local_inc_return(&cs->open_count) == 1) {
29038 spin_lock_irqsave(&cs->lock, flags);
29039 cs->tty = tty;
29040 spin_unlock_irqrestore(&cs->lock, flags);
29041 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
29042
29043 if (!cs->connected)
29044 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29045 - else if (!cs->open_count)
29046 + else if (!local_read(&cs->open_count))
29047 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29048 else {
29049 - if (!--cs->open_count) {
29050 + if (!local_dec_return(&cs->open_count)) {
29051 spin_lock_irqsave(&cs->lock, flags);
29052 cs->tty = NULL;
29053 spin_unlock_irqrestore(&cs->lock, flags);
29054 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
29055 if (!cs->connected) {
29056 gig_dbg(DEBUG_IF, "not connected");
29057 retval = -ENODEV;
29058 - } else if (!cs->open_count)
29059 + } else if (!local_read(&cs->open_count))
29060 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29061 else {
29062 retval = 0;
29063 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
29064 retval = -ENODEV;
29065 goto done;
29066 }
29067 - if (!cs->open_count) {
29068 + if (!local_read(&cs->open_count)) {
29069 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29070 retval = -ENODEV;
29071 goto done;
29072 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
29073 if (!cs->connected) {
29074 gig_dbg(DEBUG_IF, "not connected");
29075 retval = -ENODEV;
29076 - } else if (!cs->open_count)
29077 + } else if (!local_read(&cs->open_count))
29078 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29079 else if (cs->mstate != MS_LOCKED) {
29080 dev_warn(cs->dev, "can't write to unlocked device\n");
29081 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
29082
29083 if (!cs->connected)
29084 gig_dbg(DEBUG_IF, "not connected");
29085 - else if (!cs->open_count)
29086 + else if (!local_read(&cs->open_count))
29087 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29088 else if (cs->mstate != MS_LOCKED)
29089 dev_warn(cs->dev, "can't write to unlocked device\n");
29090 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
29091
29092 if (!cs->connected)
29093 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29094 - else if (!cs->open_count)
29095 + else if (!local_read(&cs->open_count))
29096 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29097 else
29098 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29099 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
29100
29101 if (!cs->connected)
29102 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29103 - else if (!cs->open_count)
29104 + else if (!local_read(&cs->open_count))
29105 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29106 else
29107 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29108 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
29109 goto out;
29110 }
29111
29112 - if (!cs->open_count) {
29113 + if (!local_read(&cs->open_count)) {
29114 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29115 goto out;
29116 }
29117 diff -urNp linux-3.0.8/drivers/isdn/hardware/avm/b1.c linux-3.0.8/drivers/isdn/hardware/avm/b1.c
29118 --- linux-3.0.8/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
29119 +++ linux-3.0.8/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
29120 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
29121 }
29122 if (left) {
29123 if (t4file->user) {
29124 - if (copy_from_user(buf, dp, left))
29125 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29126 return -EFAULT;
29127 } else {
29128 memcpy(buf, dp, left);
29129 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
29130 }
29131 if (left) {
29132 if (config->user) {
29133 - if (copy_from_user(buf, dp, left))
29134 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29135 return -EFAULT;
29136 } else {
29137 memcpy(buf, dp, left);
29138 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.8/drivers/isdn/hardware/eicon/capidtmf.c
29139 --- linux-3.0.8/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
29140 +++ linux-3.0.8/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
29141 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29142 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29143 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29144
29145 + pax_track_stack();
29146
29147 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29148 {
29149 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.8/drivers/isdn/hardware/eicon/capifunc.c
29150 --- linux-3.0.8/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
29151 +++ linux-3.0.8/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
29152 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29153 IDI_SYNC_REQ req;
29154 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29155
29156 + pax_track_stack();
29157 +
29158 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29159
29160 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29161 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.8/drivers/isdn/hardware/eicon/diddfunc.c
29162 --- linux-3.0.8/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
29163 +++ linux-3.0.8/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
29164 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29165 IDI_SYNC_REQ req;
29166 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29167
29168 + pax_track_stack();
29169 +
29170 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29171
29172 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29173 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.8/drivers/isdn/hardware/eicon/divasfunc.c
29174 --- linux-3.0.8/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
29175 +++ linux-3.0.8/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
29176 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
29177 IDI_SYNC_REQ req;
29178 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29179
29180 + pax_track_stack();
29181 +
29182 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29183
29184 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29185 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/divasync.h linux-3.0.8/drivers/isdn/hardware/eicon/divasync.h
29186 --- linux-3.0.8/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
29187 +++ linux-3.0.8/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
29188 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
29189 } diva_didd_add_adapter_t;
29190 typedef struct _diva_didd_remove_adapter {
29191 IDI_CALL p_request;
29192 -} diva_didd_remove_adapter_t;
29193 +} __no_const diva_didd_remove_adapter_t;
29194 typedef struct _diva_didd_read_adapter_array {
29195 void * buffer;
29196 dword length;
29197 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.8/drivers/isdn/hardware/eicon/idifunc.c
29198 --- linux-3.0.8/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
29199 +++ linux-3.0.8/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
29200 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29201 IDI_SYNC_REQ req;
29202 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29203
29204 + pax_track_stack();
29205 +
29206 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29207
29208 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29209 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/message.c linux-3.0.8/drivers/isdn/hardware/eicon/message.c
29210 --- linux-3.0.8/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
29211 +++ linux-3.0.8/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
29212 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
29213 dword d;
29214 word w;
29215
29216 + pax_track_stack();
29217 +
29218 a = plci->adapter;
29219 Id = ((word)plci->Id<<8)|a->Id;
29220 PUT_WORD(&SS_Ind[4],0x0000);
29221 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
29222 word j, n, w;
29223 dword d;
29224
29225 + pax_track_stack();
29226 +
29227
29228 for(i=0;i<8;i++) bp_parms[i].length = 0;
29229 for(i=0;i<2;i++) global_config[i].length = 0;
29230 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
29231 const byte llc3[] = {4,3,2,2,6,6,0};
29232 const byte header[] = {0,2,3,3,0,0,0};
29233
29234 + pax_track_stack();
29235 +
29236 for(i=0;i<8;i++) bp_parms[i].length = 0;
29237 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29238 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29239 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
29240 word appl_number_group_type[MAX_APPL];
29241 PLCI *auxplci;
29242
29243 + pax_track_stack();
29244 +
29245 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29246
29247 if(!a->group_optimization_enabled)
29248 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.8/drivers/isdn/hardware/eicon/mntfunc.c
29249 --- linux-3.0.8/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
29250 +++ linux-3.0.8/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
29251 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29252 IDI_SYNC_REQ req;
29253 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29254
29255 + pax_track_stack();
29256 +
29257 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29258
29259 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29260 diff -urNp linux-3.0.8/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.8/drivers/isdn/hardware/eicon/xdi_adapter.h
29261 --- linux-3.0.8/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
29262 +++ linux-3.0.8/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
29263 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
29264 typedef struct _diva_os_idi_adapter_interface {
29265 diva_init_card_proc_t cleanup_adapter_proc;
29266 diva_cmd_card_proc_t cmd_proc;
29267 -} diva_os_idi_adapter_interface_t;
29268 +} __no_const diva_os_idi_adapter_interface_t;
29269
29270 typedef struct _diva_os_xdi_adapter {
29271 struct list_head link;
29272 diff -urNp linux-3.0.8/drivers/isdn/i4l/isdn_common.c linux-3.0.8/drivers/isdn/i4l/isdn_common.c
29273 --- linux-3.0.8/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
29274 +++ linux-3.0.8/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
29275 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
29276 } iocpar;
29277 void __user *argp = (void __user *)arg;
29278
29279 + pax_track_stack();
29280 +
29281 #define name iocpar.name
29282 #define bname iocpar.bname
29283 #define iocts iocpar.iocts
29284 diff -urNp linux-3.0.8/drivers/isdn/icn/icn.c linux-3.0.8/drivers/isdn/icn/icn.c
29285 --- linux-3.0.8/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
29286 +++ linux-3.0.8/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
29287 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
29288 if (count > len)
29289 count = len;
29290 if (user) {
29291 - if (copy_from_user(msg, buf, count))
29292 + if (count > sizeof msg || copy_from_user(msg, buf, count))
29293 return -EFAULT;
29294 } else
29295 memcpy(msg, buf, count);
29296 diff -urNp linux-3.0.8/drivers/lguest/core.c linux-3.0.8/drivers/lguest/core.c
29297 --- linux-3.0.8/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
29298 +++ linux-3.0.8/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
29299 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
29300 * it's worked so far. The end address needs +1 because __get_vm_area
29301 * allocates an extra guard page, so we need space for that.
29302 */
29303 +
29304 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29305 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29306 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
29307 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29308 +#else
29309 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29310 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
29311 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29312 +#endif
29313 +
29314 if (!switcher_vma) {
29315 err = -ENOMEM;
29316 printk("lguest: could not map switcher pages high\n");
29317 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
29318 * Now the Switcher is mapped at the right address, we can't fail!
29319 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
29320 */
29321 - memcpy(switcher_vma->addr, start_switcher_text,
29322 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
29323 end_switcher_text - start_switcher_text);
29324
29325 printk(KERN_INFO "lguest: mapped switcher at %p\n",
29326 diff -urNp linux-3.0.8/drivers/lguest/x86/core.c linux-3.0.8/drivers/lguest/x86/core.c
29327 --- linux-3.0.8/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
29328 +++ linux-3.0.8/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
29329 @@ -59,7 +59,7 @@ static struct {
29330 /* Offset from where switcher.S was compiled to where we've copied it */
29331 static unsigned long switcher_offset(void)
29332 {
29333 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
29334 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
29335 }
29336
29337 /* This cpu's struct lguest_pages. */
29338 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
29339 * These copies are pretty cheap, so we do them unconditionally: */
29340 /* Save the current Host top-level page directory.
29341 */
29342 +
29343 +#ifdef CONFIG_PAX_PER_CPU_PGD
29344 + pages->state.host_cr3 = read_cr3();
29345 +#else
29346 pages->state.host_cr3 = __pa(current->mm->pgd);
29347 +#endif
29348 +
29349 /*
29350 * Set up the Guest's page tables to see this CPU's pages (and no
29351 * other CPU's pages).
29352 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
29353 * compiled-in switcher code and the high-mapped copy we just made.
29354 */
29355 for (i = 0; i < IDT_ENTRIES; i++)
29356 - default_idt_entries[i] += switcher_offset();
29357 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
29358
29359 /*
29360 * Set up the Switcher's per-cpu areas.
29361 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
29362 * it will be undisturbed when we switch. To change %cs and jump we
29363 * need this structure to feed to Intel's "lcall" instruction.
29364 */
29365 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
29366 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
29367 lguest_entry.segment = LGUEST_CS;
29368
29369 /*
29370 diff -urNp linux-3.0.8/drivers/lguest/x86/switcher_32.S linux-3.0.8/drivers/lguest/x86/switcher_32.S
29371 --- linux-3.0.8/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
29372 +++ linux-3.0.8/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
29373 @@ -87,6 +87,7 @@
29374 #include <asm/page.h>
29375 #include <asm/segment.h>
29376 #include <asm/lguest.h>
29377 +#include <asm/processor-flags.h>
29378
29379 // We mark the start of the code to copy
29380 // It's placed in .text tho it's never run here
29381 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
29382 // Changes type when we load it: damn Intel!
29383 // For after we switch over our page tables
29384 // That entry will be read-only: we'd crash.
29385 +
29386 +#ifdef CONFIG_PAX_KERNEXEC
29387 + mov %cr0, %edx
29388 + xor $X86_CR0_WP, %edx
29389 + mov %edx, %cr0
29390 +#endif
29391 +
29392 movl $(GDT_ENTRY_TSS*8), %edx
29393 ltr %dx
29394
29395 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
29396 // Let's clear it again for our return.
29397 // The GDT descriptor of the Host
29398 // Points to the table after two "size" bytes
29399 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
29400 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
29401 // Clear "used" from type field (byte 5, bit 2)
29402 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
29403 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
29404 +
29405 +#ifdef CONFIG_PAX_KERNEXEC
29406 + mov %cr0, %eax
29407 + xor $X86_CR0_WP, %eax
29408 + mov %eax, %cr0
29409 +#endif
29410
29411 // Once our page table's switched, the Guest is live!
29412 // The Host fades as we run this final step.
29413 @@ -295,13 +309,12 @@ deliver_to_host:
29414 // I consulted gcc, and it gave
29415 // These instructions, which I gladly credit:
29416 leal (%edx,%ebx,8), %eax
29417 - movzwl (%eax),%edx
29418 - movl 4(%eax), %eax
29419 - xorw %ax, %ax
29420 - orl %eax, %edx
29421 + movl 4(%eax), %edx
29422 + movw (%eax), %dx
29423 // Now the address of the handler's in %edx
29424 // We call it now: its "iret" drops us home.
29425 - jmp *%edx
29426 + ljmp $__KERNEL_CS, $1f
29427 +1: jmp *%edx
29428
29429 // Every interrupt can come to us here
29430 // But we must truly tell each apart.
29431 diff -urNp linux-3.0.8/drivers/macintosh/macio_asic.c linux-3.0.8/drivers/macintosh/macio_asic.c
29432 --- linux-3.0.8/drivers/macintosh/macio_asic.c 2011-07-21 22:17:23.000000000 -0400
29433 +++ linux-3.0.8/drivers/macintosh/macio_asic.c 2011-10-11 10:44:33.000000000 -0400
29434 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
29435 * MacIO is matched against any Apple ID, it's probe() function
29436 * will then decide wether it applies or not
29437 */
29438 -static const struct pci_device_id __devinitdata pci_ids [] = { {
29439 +static const struct pci_device_id __devinitconst pci_ids [] = { {
29440 .vendor = PCI_VENDOR_ID_APPLE,
29441 .device = PCI_ANY_ID,
29442 .subvendor = PCI_ANY_ID,
29443 diff -urNp linux-3.0.8/drivers/md/dm.c linux-3.0.8/drivers/md/dm.c
29444 --- linux-3.0.8/drivers/md/dm.c 2011-10-24 08:05:21.000000000 -0400
29445 +++ linux-3.0.8/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
29446 @@ -164,9 +164,9 @@ struct mapped_device {
29447 /*
29448 * Event handling.
29449 */
29450 - atomic_t event_nr;
29451 + atomic_unchecked_t event_nr;
29452 wait_queue_head_t eventq;
29453 - atomic_t uevent_seq;
29454 + atomic_unchecked_t uevent_seq;
29455 struct list_head uevent_list;
29456 spinlock_t uevent_lock; /* Protect access to uevent_list */
29457
29458 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
29459 rwlock_init(&md->map_lock);
29460 atomic_set(&md->holders, 1);
29461 atomic_set(&md->open_count, 0);
29462 - atomic_set(&md->event_nr, 0);
29463 - atomic_set(&md->uevent_seq, 0);
29464 + atomic_set_unchecked(&md->event_nr, 0);
29465 + atomic_set_unchecked(&md->uevent_seq, 0);
29466 INIT_LIST_HEAD(&md->uevent_list);
29467 spin_lock_init(&md->uevent_lock);
29468
29469 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
29470
29471 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
29472
29473 - atomic_inc(&md->event_nr);
29474 + atomic_inc_unchecked(&md->event_nr);
29475 wake_up(&md->eventq);
29476 }
29477
29478 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
29479
29480 uint32_t dm_next_uevent_seq(struct mapped_device *md)
29481 {
29482 - return atomic_add_return(1, &md->uevent_seq);
29483 + return atomic_add_return_unchecked(1, &md->uevent_seq);
29484 }
29485
29486 uint32_t dm_get_event_nr(struct mapped_device *md)
29487 {
29488 - return atomic_read(&md->event_nr);
29489 + return atomic_read_unchecked(&md->event_nr);
29490 }
29491
29492 int dm_wait_event(struct mapped_device *md, int event_nr)
29493 {
29494 return wait_event_interruptible(md->eventq,
29495 - (event_nr != atomic_read(&md->event_nr)));
29496 + (event_nr != atomic_read_unchecked(&md->event_nr)));
29497 }
29498
29499 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
29500 diff -urNp linux-3.0.8/drivers/md/dm-ioctl.c linux-3.0.8/drivers/md/dm-ioctl.c
29501 --- linux-3.0.8/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
29502 +++ linux-3.0.8/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
29503 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
29504 cmd == DM_LIST_VERSIONS_CMD)
29505 return 0;
29506
29507 - if ((cmd == DM_DEV_CREATE_CMD)) {
29508 + if (cmd == DM_DEV_CREATE_CMD) {
29509 if (!*param->name) {
29510 DMWARN("name not supplied when creating device");
29511 return -EINVAL;
29512 diff -urNp linux-3.0.8/drivers/md/dm-raid1.c linux-3.0.8/drivers/md/dm-raid1.c
29513 --- linux-3.0.8/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
29514 +++ linux-3.0.8/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
29515 @@ -40,7 +40,7 @@ enum dm_raid1_error {
29516
29517 struct mirror {
29518 struct mirror_set *ms;
29519 - atomic_t error_count;
29520 + atomic_unchecked_t error_count;
29521 unsigned long error_type;
29522 struct dm_dev *dev;
29523 sector_t offset;
29524 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
29525 struct mirror *m;
29526
29527 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
29528 - if (!atomic_read(&m->error_count))
29529 + if (!atomic_read_unchecked(&m->error_count))
29530 return m;
29531
29532 return NULL;
29533 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
29534 * simple way to tell if a device has encountered
29535 * errors.
29536 */
29537 - atomic_inc(&m->error_count);
29538 + atomic_inc_unchecked(&m->error_count);
29539
29540 if (test_and_set_bit(error_type, &m->error_type))
29541 return;
29542 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
29543 struct mirror *m = get_default_mirror(ms);
29544
29545 do {
29546 - if (likely(!atomic_read(&m->error_count)))
29547 + if (likely(!atomic_read_unchecked(&m->error_count)))
29548 return m;
29549
29550 if (m-- == ms->mirror)
29551 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
29552 {
29553 struct mirror *default_mirror = get_default_mirror(m->ms);
29554
29555 - return !atomic_read(&default_mirror->error_count);
29556 + return !atomic_read_unchecked(&default_mirror->error_count);
29557 }
29558
29559 static int mirror_available(struct mirror_set *ms, struct bio *bio)
29560 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
29561 */
29562 if (likely(region_in_sync(ms, region, 1)))
29563 m = choose_mirror(ms, bio->bi_sector);
29564 - else if (m && atomic_read(&m->error_count))
29565 + else if (m && atomic_read_unchecked(&m->error_count))
29566 m = NULL;
29567
29568 if (likely(m))
29569 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29570 }
29571
29572 ms->mirror[mirror].ms = ms;
29573 - atomic_set(&(ms->mirror[mirror].error_count), 0);
29574 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29575 ms->mirror[mirror].error_type = 0;
29576 ms->mirror[mirror].offset = offset;
29577
29578 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29579 */
29580 static char device_status_char(struct mirror *m)
29581 {
29582 - if (!atomic_read(&(m->error_count)))
29583 + if (!atomic_read_unchecked(&(m->error_count)))
29584 return 'A';
29585
29586 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29587 diff -urNp linux-3.0.8/drivers/md/dm-stripe.c linux-3.0.8/drivers/md/dm-stripe.c
29588 --- linux-3.0.8/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
29589 +++ linux-3.0.8/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
29590 @@ -20,7 +20,7 @@ struct stripe {
29591 struct dm_dev *dev;
29592 sector_t physical_start;
29593
29594 - atomic_t error_count;
29595 + atomic_unchecked_t error_count;
29596 };
29597
29598 struct stripe_c {
29599 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29600 kfree(sc);
29601 return r;
29602 }
29603 - atomic_set(&(sc->stripe[i].error_count), 0);
29604 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29605 }
29606
29607 ti->private = sc;
29608 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29609 DMEMIT("%d ", sc->stripes);
29610 for (i = 0; i < sc->stripes; i++) {
29611 DMEMIT("%s ", sc->stripe[i].dev->name);
29612 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29613 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29614 'D' : 'A';
29615 }
29616 buffer[i] = '\0';
29617 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29618 */
29619 for (i = 0; i < sc->stripes; i++)
29620 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29621 - atomic_inc(&(sc->stripe[i].error_count));
29622 - if (atomic_read(&(sc->stripe[i].error_count)) <
29623 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
29624 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29625 DM_IO_ERROR_THRESHOLD)
29626 schedule_work(&sc->trigger_event);
29627 }
29628 diff -urNp linux-3.0.8/drivers/md/dm-table.c linux-3.0.8/drivers/md/dm-table.c
29629 --- linux-3.0.8/drivers/md/dm-table.c 2011-10-24 08:05:32.000000000 -0400
29630 +++ linux-3.0.8/drivers/md/dm-table.c 2011-10-17 23:17:19.000000000 -0400
29631 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
29632 if (!dev_size)
29633 return 0;
29634
29635 - if ((start >= dev_size) || (start + len > dev_size)) {
29636 + if ((start >= dev_size) || (len > dev_size - start)) {
29637 DMWARN("%s: %s too small for target: "
29638 "start=%llu, len=%llu, dev_size=%llu",
29639 dm_device_name(ti->table->md), bdevname(bdev, b),
29640 diff -urNp linux-3.0.8/drivers/md/md.c linux-3.0.8/drivers/md/md.c
29641 --- linux-3.0.8/drivers/md/md.c 2011-10-24 08:05:32.000000000 -0400
29642 +++ linux-3.0.8/drivers/md/md.c 2011-10-17 23:17:19.000000000 -0400
29643 @@ -231,10 +231,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
29644 * start build, activate spare
29645 */
29646 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29647 -static atomic_t md_event_count;
29648 +static atomic_unchecked_t md_event_count;
29649 void md_new_event(mddev_t *mddev)
29650 {
29651 - atomic_inc(&md_event_count);
29652 + atomic_inc_unchecked(&md_event_count);
29653 wake_up(&md_event_waiters);
29654 }
29655 EXPORT_SYMBOL_GPL(md_new_event);
29656 @@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29657 */
29658 static void md_new_event_inintr(mddev_t *mddev)
29659 {
29660 - atomic_inc(&md_event_count);
29661 + atomic_inc_unchecked(&md_event_count);
29662 wake_up(&md_event_waiters);
29663 }
29664
29665 @@ -1475,7 +1475,7 @@ static int super_1_load(mdk_rdev_t *rdev
29666
29667 rdev->preferred_minor = 0xffff;
29668 rdev->data_offset = le64_to_cpu(sb->data_offset);
29669 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29670 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29671
29672 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29673 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29674 @@ -1653,7 +1653,7 @@ static void super_1_sync(mddev_t *mddev,
29675 else
29676 sb->resync_offset = cpu_to_le64(0);
29677
29678 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29679 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29680
29681 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29682 sb->size = cpu_to_le64(mddev->dev_sectors);
29683 @@ -2446,7 +2446,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29684 static ssize_t
29685 errors_show(mdk_rdev_t *rdev, char *page)
29686 {
29687 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29688 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29689 }
29690
29691 static ssize_t
29692 @@ -2455,7 +2455,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29693 char *e;
29694 unsigned long n = simple_strtoul(buf, &e, 10);
29695 if (*buf && (*e == 0 || *e == '\n')) {
29696 - atomic_set(&rdev->corrected_errors, n);
29697 + atomic_set_unchecked(&rdev->corrected_errors, n);
29698 return len;
29699 }
29700 return -EINVAL;
29701 @@ -2811,8 +2811,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
29702 rdev->last_read_error.tv_sec = 0;
29703 rdev->last_read_error.tv_nsec = 0;
29704 atomic_set(&rdev->nr_pending, 0);
29705 - atomic_set(&rdev->read_errors, 0);
29706 - atomic_set(&rdev->corrected_errors, 0);
29707 + atomic_set_unchecked(&rdev->read_errors, 0);
29708 + atomic_set_unchecked(&rdev->corrected_errors, 0);
29709
29710 INIT_LIST_HEAD(&rdev->same_set);
29711 init_waitqueue_head(&rdev->blocked_wait);
29712 @@ -6440,7 +6440,7 @@ static int md_seq_show(struct seq_file *
29713
29714 spin_unlock(&pers_lock);
29715 seq_printf(seq, "\n");
29716 - mi->event = atomic_read(&md_event_count);
29717 + mi->event = atomic_read_unchecked(&md_event_count);
29718 return 0;
29719 }
29720 if (v == (void*)2) {
29721 @@ -6529,7 +6529,7 @@ static int md_seq_show(struct seq_file *
29722 chunk_kb ? "KB" : "B");
29723 if (bitmap->file) {
29724 seq_printf(seq, ", file: ");
29725 - seq_path(seq, &bitmap->file->f_path, " \t\n");
29726 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29727 }
29728
29729 seq_printf(seq, "\n");
29730 @@ -6563,7 +6563,7 @@ static int md_seq_open(struct inode *ino
29731 else {
29732 struct seq_file *p = file->private_data;
29733 p->private = mi;
29734 - mi->event = atomic_read(&md_event_count);
29735 + mi->event = atomic_read_unchecked(&md_event_count);
29736 }
29737 return error;
29738 }
29739 @@ -6579,7 +6579,7 @@ static unsigned int mdstat_poll(struct f
29740 /* always allow read */
29741 mask = POLLIN | POLLRDNORM;
29742
29743 - if (mi->event != atomic_read(&md_event_count))
29744 + if (mi->event != atomic_read_unchecked(&md_event_count))
29745 mask |= POLLERR | POLLPRI;
29746 return mask;
29747 }
29748 @@ -6623,7 +6623,7 @@ static int is_mddev_idle(mddev_t *mddev,
29749 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29750 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29751 (int)part_stat_read(&disk->part0, sectors[1]) -
29752 - atomic_read(&disk->sync_io);
29753 + atomic_read_unchecked(&disk->sync_io);
29754 /* sync IO will cause sync_io to increase before the disk_stats
29755 * as sync_io is counted when a request starts, and
29756 * disk_stats is counted when it completes.
29757 diff -urNp linux-3.0.8/drivers/md/md.h linux-3.0.8/drivers/md/md.h
29758 --- linux-3.0.8/drivers/md/md.h 2011-10-24 08:05:32.000000000 -0400
29759 +++ linux-3.0.8/drivers/md/md.h 2011-10-17 23:17:19.000000000 -0400
29760 @@ -97,13 +97,13 @@ struct mdk_rdev_s
29761 * only maintained for arrays that
29762 * support hot removal
29763 */
29764 - atomic_t read_errors; /* number of consecutive read errors that
29765 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
29766 * we have tried to ignore.
29767 */
29768 struct timespec last_read_error; /* monotonic time since our
29769 * last read error
29770 */
29771 - atomic_t corrected_errors; /* number of corrected read errors,
29772 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29773 * for reporting to userspace and storing
29774 * in superblock.
29775 */
29776 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
29777
29778 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29779 {
29780 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29781 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29782 }
29783
29784 struct mdk_personality
29785 diff -urNp linux-3.0.8/drivers/md/raid10.c linux-3.0.8/drivers/md/raid10.c
29786 --- linux-3.0.8/drivers/md/raid10.c 2011-10-24 08:05:32.000000000 -0400
29787 +++ linux-3.0.8/drivers/md/raid10.c 2011-10-17 23:17:19.000000000 -0400
29788 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
29789 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
29790 set_bit(R10BIO_Uptodate, &r10_bio->state);
29791 else {
29792 - atomic_add(r10_bio->sectors,
29793 + atomic_add_unchecked(r10_bio->sectors,
29794 &conf->mirrors[d].rdev->corrected_errors);
29795 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
29796 md_error(r10_bio->mddev,
29797 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
29798 {
29799 struct timespec cur_time_mon;
29800 unsigned long hours_since_last;
29801 - unsigned int read_errors = atomic_read(&rdev->read_errors);
29802 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29803
29804 ktime_get_ts(&cur_time_mon);
29805
29806 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
29807 * overflowing the shift of read_errors by hours_since_last.
29808 */
29809 if (hours_since_last >= 8 * sizeof(read_errors))
29810 - atomic_set(&rdev->read_errors, 0);
29811 + atomic_set_unchecked(&rdev->read_errors, 0);
29812 else
29813 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29814 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29815 }
29816
29817 /*
29818 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
29819 return;
29820
29821 check_decay_read_errors(mddev, rdev);
29822 - atomic_inc(&rdev->read_errors);
29823 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
29824 + atomic_inc_unchecked(&rdev->read_errors);
29825 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29826 char b[BDEVNAME_SIZE];
29827 bdevname(rdev->bdev, b);
29828
29829 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
29830 "md/raid10:%s: %s: Raid device exceeded "
29831 "read_error threshold [cur %d:max %d]\n",
29832 mdname(mddev), b,
29833 - atomic_read(&rdev->read_errors), max_read_errors);
29834 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29835 printk(KERN_NOTICE
29836 "md/raid10:%s: %s: Failing raid device\n",
29837 mdname(mddev), b);
29838 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
29839 test_bit(In_sync, &rdev->flags)) {
29840 atomic_inc(&rdev->nr_pending);
29841 rcu_read_unlock();
29842 - atomic_add(s, &rdev->corrected_errors);
29843 + atomic_add_unchecked(s, &rdev->corrected_errors);
29844 if (sync_page_io(rdev,
29845 r10_bio->devs[sl].addr +
29846 sect,
29847 diff -urNp linux-3.0.8/drivers/md/raid1.c linux-3.0.8/drivers/md/raid1.c
29848 --- linux-3.0.8/drivers/md/raid1.c 2011-10-24 08:05:32.000000000 -0400
29849 +++ linux-3.0.8/drivers/md/raid1.c 2011-10-17 23:17:19.000000000 -0400
29850 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
29851 rdev_dec_pending(rdev, mddev);
29852 md_error(mddev, rdev);
29853 } else
29854 - atomic_add(s, &rdev->corrected_errors);
29855 + atomic_add_unchecked(s, &rdev->corrected_errors);
29856 }
29857 d = start;
29858 while (d != r1_bio->read_disk) {
29859 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
29860 /* Well, this device is dead */
29861 md_error(mddev, rdev);
29862 else {
29863 - atomic_add(s, &rdev->corrected_errors);
29864 + atomic_add_unchecked(s, &rdev->corrected_errors);
29865 printk(KERN_INFO
29866 "md/raid1:%s: read error corrected "
29867 "(%d sectors at %llu on %s)\n",
29868 diff -urNp linux-3.0.8/drivers/md/raid5.c linux-3.0.8/drivers/md/raid5.c
29869 --- linux-3.0.8/drivers/md/raid5.c 2011-10-24 08:05:32.000000000 -0400
29870 +++ linux-3.0.8/drivers/md/raid5.c 2011-10-17 23:17:19.000000000 -0400
29871 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
29872 bi->bi_next = NULL;
29873 if ((rw & WRITE) &&
29874 test_bit(R5_ReWrite, &sh->dev[i].flags))
29875 - atomic_add(STRIPE_SECTORS,
29876 + atomic_add_unchecked(STRIPE_SECTORS,
29877 &rdev->corrected_errors);
29878 generic_make_request(bi);
29879 } else {
29880 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
29881 clear_bit(R5_ReadError, &sh->dev[i].flags);
29882 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29883 }
29884 - if (atomic_read(&conf->disks[i].rdev->read_errors))
29885 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
29886 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29887 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29888 } else {
29889 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29890 int retry = 0;
29891 rdev = conf->disks[i].rdev;
29892
29893 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29894 - atomic_inc(&rdev->read_errors);
29895 + atomic_inc_unchecked(&rdev->read_errors);
29896 if (conf->mddev->degraded >= conf->max_degraded)
29897 printk_rl(KERN_WARNING
29898 "md/raid:%s: read error not correctable "
29899 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
29900 (unsigned long long)(sh->sector
29901 + rdev->data_offset),
29902 bdn);
29903 - else if (atomic_read(&rdev->read_errors)
29904 + else if (atomic_read_unchecked(&rdev->read_errors)
29905 > conf->max_nr_stripes)
29906 printk(KERN_WARNING
29907 "md/raid:%s: Too many read errors, failing device %s.\n",
29908 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
29909 sector_t r_sector;
29910 struct stripe_head sh2;
29911
29912 + pax_track_stack();
29913
29914 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29915 stripe = new_sector;
29916 diff -urNp linux-3.0.8/drivers/media/common/saa7146_hlp.c linux-3.0.8/drivers/media/common/saa7146_hlp.c
29917 --- linux-3.0.8/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
29918 +++ linux-3.0.8/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
29919 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
29920
29921 int x[32], y[32], w[32], h[32];
29922
29923 + pax_track_stack();
29924 +
29925 /* clear out memory */
29926 memset(&line_list[0], 0x00, sizeof(u32)*32);
29927 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29928 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.8/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29929 --- linux-3.0.8/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
29930 +++ linux-3.0.8/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
29931 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29932 u8 buf[HOST_LINK_BUF_SIZE];
29933 int i;
29934
29935 + pax_track_stack();
29936 +
29937 dprintk("%s\n", __func__);
29938
29939 /* check if we have space for a link buf in the rx_buffer */
29940 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29941 unsigned long timeout;
29942 int written;
29943
29944 + pax_track_stack();
29945 +
29946 dprintk("%s\n", __func__);
29947
29948 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29949 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.8/drivers/media/dvb/dvb-core/dvb_demux.h
29950 --- linux-3.0.8/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
29951 +++ linux-3.0.8/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
29952 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
29953 union {
29954 struct dmx_ts_feed ts;
29955 struct dmx_section_feed sec;
29956 - } feed;
29957 + } __no_const feed;
29958
29959 union {
29960 dmx_ts_cb ts;
29961 dmx_section_cb sec;
29962 - } cb;
29963 + } __no_const cb;
29964
29965 struct dvb_demux *demux;
29966 void *priv;
29967 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.8/drivers/media/dvb/dvb-core/dvbdev.c
29968 --- linux-3.0.8/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
29969 +++ linux-3.0.8/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
29970 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29971 const struct dvb_device *template, void *priv, int type)
29972 {
29973 struct dvb_device *dvbdev;
29974 - struct file_operations *dvbdevfops;
29975 + file_operations_no_const *dvbdevfops;
29976 struct device *clsdev;
29977 int minor;
29978 int id;
29979 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.8/drivers/media/dvb/dvb-usb/cxusb.c
29980 --- linux-3.0.8/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
29981 +++ linux-3.0.8/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
29982 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29983 struct dib0700_adapter_state {
29984 int (*set_param_save) (struct dvb_frontend *,
29985 struct dvb_frontend_parameters *);
29986 -};
29987 +} __no_const;
29988
29989 static int dib7070_set_param_override(struct dvb_frontend *fe,
29990 struct dvb_frontend_parameters *fep)
29991 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.8/drivers/media/dvb/dvb-usb/dib0700_core.c
29992 --- linux-3.0.8/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
29993 +++ linux-3.0.8/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
29994 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
29995 if (!buf)
29996 return -ENOMEM;
29997
29998 + pax_track_stack();
29999 +
30000 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30001 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
30002 hx.addr, hx.len, hx.chk);
30003 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.8/drivers/media/dvb/dvb-usb/dibusb.h
30004 --- linux-3.0.8/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
30005 +++ linux-3.0.8/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
30006 @@ -97,7 +97,7 @@
30007 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
30008
30009 struct dibusb_state {
30010 - struct dib_fe_xfer_ops ops;
30011 + dib_fe_xfer_ops_no_const ops;
30012 int mt2060_present;
30013 u8 tuner_addr;
30014 };
30015 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.8/drivers/media/dvb/dvb-usb/dw2102.c
30016 --- linux-3.0.8/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
30017 +++ linux-3.0.8/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
30018 @@ -95,7 +95,7 @@ struct su3000_state {
30019
30020 struct s6x0_state {
30021 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
30022 -};
30023 +} __no_const;
30024
30025 /* debug */
30026 static int dvb_usb_dw2102_debug;
30027 diff -urNp linux-3.0.8/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.8/drivers/media/dvb/dvb-usb/lmedm04.c
30028 --- linux-3.0.8/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
30029 +++ linux-3.0.8/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
30030 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
30031 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
30032 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
30033
30034 + pax_track_stack();
30035
30036 data[0] = 0x8a;
30037 len_in = 1;
30038 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
30039 int ret = 0, len_in;
30040 u8 data[512] = {0};
30041
30042 + pax_track_stack();
30043 +
30044 data[0] = 0x0a;
30045 len_in = 1;
30046 info("FRM Firmware Cold Reset");
30047 diff -urNp linux-3.0.8/drivers/media/dvb/frontends/dib3000.h linux-3.0.8/drivers/media/dvb/frontends/dib3000.h
30048 --- linux-3.0.8/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
30049 +++ linux-3.0.8/drivers/media/dvb/frontends/dib3000.h 2011-10-07 19:07:39.000000000 -0400
30050 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
30051 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
30052 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
30053 };
30054 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
30055
30056 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
30057 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30058 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
30059 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
30060 #else
30061 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30062 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
30063 diff -urNp linux-3.0.8/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.8/drivers/media/dvb/frontends/dib3000mb.c
30064 --- linux-3.0.8/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
30065 +++ linux-3.0.8/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
30066 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
30067 static struct dvb_frontend_ops dib3000mb_ops;
30068
30069 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30070 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
30071 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
30072 {
30073 struct dib3000_state* state = NULL;
30074
30075 diff -urNp linux-3.0.8/drivers/media/dvb/frontends/mb86a16.c linux-3.0.8/drivers/media/dvb/frontends/mb86a16.c
30076 --- linux-3.0.8/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
30077 +++ linux-3.0.8/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
30078 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
30079 int ret = -1;
30080 int sync;
30081
30082 + pax_track_stack();
30083 +
30084 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
30085
30086 fcp = 3000;
30087 diff -urNp linux-3.0.8/drivers/media/dvb/frontends/or51211.c linux-3.0.8/drivers/media/dvb/frontends/or51211.c
30088 --- linux-3.0.8/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
30089 +++ linux-3.0.8/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
30090 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30091 u8 tudata[585];
30092 int i;
30093
30094 + pax_track_stack();
30095 +
30096 dprintk("Firmware is %zd bytes\n",fw->size);
30097
30098 /* Get eprom data */
30099 diff -urNp linux-3.0.8/drivers/media/dvb/ngene/ngene-cards.c linux-3.0.8/drivers/media/dvb/ngene/ngene-cards.c
30100 --- linux-3.0.8/drivers/media/dvb/ngene/ngene-cards.c 2011-07-21 22:17:23.000000000 -0400
30101 +++ linux-3.0.8/drivers/media/dvb/ngene/ngene-cards.c 2011-10-11 10:44:33.000000000 -0400
30102 @@ -379,7 +379,7 @@ static struct ngene_info ngene_info_m780
30103
30104 /****************************************************************************/
30105
30106 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
30107 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
30108 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
30109 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
30110 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
30111 diff -urNp linux-3.0.8/drivers/media/video/cx18/cx18-driver.c linux-3.0.8/drivers/media/video/cx18/cx18-driver.c
30112 --- linux-3.0.8/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
30113 +++ linux-3.0.8/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
30114 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30115 struct i2c_client c;
30116 u8 eedata[256];
30117
30118 + pax_track_stack();
30119 +
30120 memset(&c, 0, sizeof(c));
30121 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30122 c.adapter = &cx->i2c_adap[0];
30123 diff -urNp linux-3.0.8/drivers/media/video/cx23885/cx23885-input.c linux-3.0.8/drivers/media/video/cx23885/cx23885-input.c
30124 --- linux-3.0.8/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
30125 +++ linux-3.0.8/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
30126 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
30127 bool handle = false;
30128 struct ir_raw_event ir_core_event[64];
30129
30130 + pax_track_stack();
30131 +
30132 do {
30133 num = 0;
30134 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
30135 diff -urNp linux-3.0.8/drivers/media/video/cx88/cx88-alsa.c linux-3.0.8/drivers/media/video/cx88/cx88-alsa.c
30136 --- linux-3.0.8/drivers/media/video/cx88/cx88-alsa.c 2011-07-21 22:17:23.000000000 -0400
30137 +++ linux-3.0.8/drivers/media/video/cx88/cx88-alsa.c 2011-10-11 10:44:33.000000000 -0400
30138 @@ -764,7 +764,7 @@ static struct snd_kcontrol_new snd_cx88_
30139 * Only boards with eeprom and byte 1 at eeprom=1 have it
30140 */
30141
30142 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
30143 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
30144 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30145 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30146 {0, }
30147 diff -urNp linux-3.0.8/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.8/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30148 --- linux-3.0.8/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
30149 +++ linux-3.0.8/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
30150 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30151 u8 *eeprom;
30152 struct tveeprom tvdata;
30153
30154 + pax_track_stack();
30155 +
30156 memset(&tvdata,0,sizeof(tvdata));
30157
30158 eeprom = pvr2_eeprom_fetch(hdw);
30159 diff -urNp linux-3.0.8/drivers/media/video/saa7134/saa6752hs.c linux-3.0.8/drivers/media/video/saa7134/saa6752hs.c
30160 --- linux-3.0.8/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
30161 +++ linux-3.0.8/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
30162 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
30163 unsigned char localPAT[256];
30164 unsigned char localPMT[256];
30165
30166 + pax_track_stack();
30167 +
30168 /* Set video format - must be done first as it resets other settings */
30169 set_reg8(client, 0x41, h->video_format);
30170
30171 diff -urNp linux-3.0.8/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.8/drivers/media/video/saa7164/saa7164-cmd.c
30172 --- linux-3.0.8/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
30173 +++ linux-3.0.8/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
30174 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30175 u8 tmp[512];
30176 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30177
30178 + pax_track_stack();
30179 +
30180 /* While any outstand message on the bus exists... */
30181 do {
30182
30183 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30184 u8 tmp[512];
30185 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30186
30187 + pax_track_stack();
30188 +
30189 while (loop) {
30190
30191 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
30192 diff -urNp linux-3.0.8/drivers/media/video/timblogiw.c linux-3.0.8/drivers/media/video/timblogiw.c
30193 --- linux-3.0.8/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
30194 +++ linux-3.0.8/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
30195 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
30196
30197 /* Platform device functions */
30198
30199 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
30200 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
30201 .vidioc_querycap = timblogiw_querycap,
30202 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
30203 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
30204 diff -urNp linux-3.0.8/drivers/media/video/usbvision/usbvision-core.c linux-3.0.8/drivers/media/video/usbvision/usbvision-core.c
30205 --- linux-3.0.8/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
30206 +++ linux-3.0.8/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
30207 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
30208 unsigned char rv, gv, bv;
30209 static unsigned char *Y, *U, *V;
30210
30211 + pax_track_stack();
30212 +
30213 frame = usbvision->cur_frame;
30214 image_size = frame->frmwidth * frame->frmheight;
30215 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30216 diff -urNp linux-3.0.8/drivers/media/video/videobuf-dma-sg.c linux-3.0.8/drivers/media/video/videobuf-dma-sg.c
30217 --- linux-3.0.8/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
30218 +++ linux-3.0.8/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
30219 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
30220 {
30221 struct videobuf_queue q;
30222
30223 + pax_track_stack();
30224 +
30225 /* Required to make generic handler to call __videobuf_alloc */
30226 q.int_ops = &sg_ops;
30227
30228 diff -urNp linux-3.0.8/drivers/message/fusion/mptbase.c linux-3.0.8/drivers/message/fusion/mptbase.c
30229 --- linux-3.0.8/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
30230 +++ linux-3.0.8/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
30231 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
30232 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30233 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30234
30235 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30236 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
30237 +#else
30238 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30239 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30240 +#endif
30241 +
30242 /*
30243 * Rounding UP to nearest 4-kB boundary here...
30244 */
30245 diff -urNp linux-3.0.8/drivers/message/fusion/mptsas.c linux-3.0.8/drivers/message/fusion/mptsas.c
30246 --- linux-3.0.8/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
30247 +++ linux-3.0.8/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
30248 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
30249 return 0;
30250 }
30251
30252 +static inline void
30253 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30254 +{
30255 + if (phy_info->port_details) {
30256 + phy_info->port_details->rphy = rphy;
30257 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30258 + ioc->name, rphy));
30259 + }
30260 +
30261 + if (rphy) {
30262 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30263 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30264 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30265 + ioc->name, rphy, rphy->dev.release));
30266 + }
30267 +}
30268 +
30269 /* no mutex */
30270 static void
30271 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30272 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
30273 return NULL;
30274 }
30275
30276 -static inline void
30277 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30278 -{
30279 - if (phy_info->port_details) {
30280 - phy_info->port_details->rphy = rphy;
30281 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30282 - ioc->name, rphy));
30283 - }
30284 -
30285 - if (rphy) {
30286 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30287 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30288 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30289 - ioc->name, rphy, rphy->dev.release));
30290 - }
30291 -}
30292 -
30293 static inline struct sas_port *
30294 mptsas_get_port(struct mptsas_phyinfo *phy_info)
30295 {
30296 diff -urNp linux-3.0.8/drivers/message/fusion/mptscsih.c linux-3.0.8/drivers/message/fusion/mptscsih.c
30297 --- linux-3.0.8/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
30298 +++ linux-3.0.8/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
30299 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
30300
30301 h = shost_priv(SChost);
30302
30303 - if (h) {
30304 - if (h->info_kbuf == NULL)
30305 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30306 - return h->info_kbuf;
30307 - h->info_kbuf[0] = '\0';
30308 + if (!h)
30309 + return NULL;
30310
30311 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30312 - h->info_kbuf[size-1] = '\0';
30313 - }
30314 + if (h->info_kbuf == NULL)
30315 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30316 + return h->info_kbuf;
30317 + h->info_kbuf[0] = '\0';
30318 +
30319 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30320 + h->info_kbuf[size-1] = '\0';
30321
30322 return h->info_kbuf;
30323 }
30324 diff -urNp linux-3.0.8/drivers/message/i2o/i2o_config.c linux-3.0.8/drivers/message/i2o/i2o_config.c
30325 --- linux-3.0.8/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
30326 +++ linux-3.0.8/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
30327 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
30328 struct i2o_message *msg;
30329 unsigned int iop;
30330
30331 + pax_track_stack();
30332 +
30333 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
30334 return -EFAULT;
30335
30336 diff -urNp linux-3.0.8/drivers/message/i2o/i2o_proc.c linux-3.0.8/drivers/message/i2o/i2o_proc.c
30337 --- linux-3.0.8/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
30338 +++ linux-3.0.8/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
30339 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
30340 "Array Controller Device"
30341 };
30342
30343 -static char *chtostr(u8 * chars, int n)
30344 -{
30345 - char tmp[256];
30346 - tmp[0] = 0;
30347 - return strncat(tmp, (char *)chars, n);
30348 -}
30349 -
30350 static int i2o_report_query_status(struct seq_file *seq, int block_status,
30351 char *group)
30352 {
30353 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
30354
30355 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
30356 seq_printf(seq, "%-#8x", ddm_table.module_id);
30357 - seq_printf(seq, "%-29s",
30358 - chtostr(ddm_table.module_name_version, 28));
30359 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
30360 seq_printf(seq, "%9d ", ddm_table.data_size);
30361 seq_printf(seq, "%8d", ddm_table.code_size);
30362
30363 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
30364
30365 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
30366 seq_printf(seq, "%-#8x", dst->module_id);
30367 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
30368 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
30369 + seq_printf(seq, "%-.28s", dst->module_name_version);
30370 + seq_printf(seq, "%-.8s", dst->date);
30371 seq_printf(seq, "%8d ", dst->module_size);
30372 seq_printf(seq, "%8d ", dst->mpb_size);
30373 seq_printf(seq, "0x%04x", dst->module_flags);
30374 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
30375 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
30376 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
30377 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
30378 - seq_printf(seq, "Vendor info : %s\n",
30379 - chtostr((u8 *) (work32 + 2), 16));
30380 - seq_printf(seq, "Product info : %s\n",
30381 - chtostr((u8 *) (work32 + 6), 16));
30382 - seq_printf(seq, "Description : %s\n",
30383 - chtostr((u8 *) (work32 + 10), 16));
30384 - seq_printf(seq, "Product rev. : %s\n",
30385 - chtostr((u8 *) (work32 + 14), 8));
30386 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
30387 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
30388 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
30389 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
30390
30391 seq_printf(seq, "Serial number : ");
30392 print_serial_number(seq, (u8 *) (work32 + 16),
30393 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
30394 }
30395
30396 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
30397 - seq_printf(seq, "Module name : %s\n",
30398 - chtostr(result.module_name, 24));
30399 - seq_printf(seq, "Module revision : %s\n",
30400 - chtostr(result.module_rev, 8));
30401 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
30402 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
30403
30404 seq_printf(seq, "Serial number : ");
30405 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
30406 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
30407 return 0;
30408 }
30409
30410 - seq_printf(seq, "Device name : %s\n",
30411 - chtostr(result.device_name, 64));
30412 - seq_printf(seq, "Service name : %s\n",
30413 - chtostr(result.service_name, 64));
30414 - seq_printf(seq, "Physical name : %s\n",
30415 - chtostr(result.physical_location, 64));
30416 - seq_printf(seq, "Instance number : %s\n",
30417 - chtostr(result.instance_number, 4));
30418 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
30419 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
30420 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
30421 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
30422
30423 return 0;
30424 }
30425 diff -urNp linux-3.0.8/drivers/message/i2o/iop.c linux-3.0.8/drivers/message/i2o/iop.c
30426 --- linux-3.0.8/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
30427 +++ linux-3.0.8/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
30428 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
30429
30430 spin_lock_irqsave(&c->context_list_lock, flags);
30431
30432 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
30433 - atomic_inc(&c->context_list_counter);
30434 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
30435 + atomic_inc_unchecked(&c->context_list_counter);
30436
30437 - entry->context = atomic_read(&c->context_list_counter);
30438 + entry->context = atomic_read_unchecked(&c->context_list_counter);
30439
30440 list_add(&entry->list, &c->context_list);
30441
30442 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
30443
30444 #if BITS_PER_LONG == 64
30445 spin_lock_init(&c->context_list_lock);
30446 - atomic_set(&c->context_list_counter, 0);
30447 + atomic_set_unchecked(&c->context_list_counter, 0);
30448 INIT_LIST_HEAD(&c->context_list);
30449 #endif
30450
30451 diff -urNp linux-3.0.8/drivers/mfd/ab3100-core.c linux-3.0.8/drivers/mfd/ab3100-core.c
30452 --- linux-3.0.8/drivers/mfd/ab3100-core.c 2011-07-21 22:17:23.000000000 -0400
30453 +++ linux-3.0.8/drivers/mfd/ab3100-core.c 2011-10-11 10:44:33.000000000 -0400
30454 @@ -809,7 +809,7 @@ struct ab_family_id {
30455 char *name;
30456 };
30457
30458 -static const struct ab_family_id ids[] __devinitdata = {
30459 +static const struct ab_family_id ids[] __devinitconst = {
30460 /* AB3100 */
30461 {
30462 .id = 0xc0,
30463 diff -urNp linux-3.0.8/drivers/mfd/abx500-core.c linux-3.0.8/drivers/mfd/abx500-core.c
30464 --- linux-3.0.8/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
30465 +++ linux-3.0.8/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
30466 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
30467
30468 struct abx500_device_entry {
30469 struct list_head list;
30470 - struct abx500_ops ops;
30471 + abx500_ops_no_const ops;
30472 struct device *dev;
30473 };
30474
30475 diff -urNp linux-3.0.8/drivers/mfd/janz-cmodio.c linux-3.0.8/drivers/mfd/janz-cmodio.c
30476 --- linux-3.0.8/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
30477 +++ linux-3.0.8/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
30478 @@ -13,6 +13,7 @@
30479
30480 #include <linux/kernel.h>
30481 #include <linux/module.h>
30482 +#include <linux/slab.h>
30483 #include <linux/init.h>
30484 #include <linux/pci.h>
30485 #include <linux/interrupt.h>
30486 diff -urNp linux-3.0.8/drivers/mfd/wm8350-i2c.c linux-3.0.8/drivers/mfd/wm8350-i2c.c
30487 --- linux-3.0.8/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
30488 +++ linux-3.0.8/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
30489 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
30490 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
30491 int ret;
30492
30493 + pax_track_stack();
30494 +
30495 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
30496 return -EINVAL;
30497
30498 diff -urNp linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.c
30499 --- linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-24 08:05:32.000000000 -0400
30500 +++ linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:19.000000000 -0400
30501 @@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
30502 * the lid is closed. This leads to interrupts as soon as a little move
30503 * is done.
30504 */
30505 - atomic_inc(&lis3_dev.count);
30506 + atomic_inc_unchecked(&lis3_dev.count);
30507
30508 wake_up_interruptible(&lis3_dev.misc_wait);
30509 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30510 @@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
30511 if (lis3_dev.pm_dev)
30512 pm_runtime_get_sync(lis3_dev.pm_dev);
30513
30514 - atomic_set(&lis3_dev.count, 0);
30515 + atomic_set_unchecked(&lis3_dev.count, 0);
30516 return 0;
30517 }
30518
30519 @@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
30520 add_wait_queue(&lis3_dev.misc_wait, &wait);
30521 while (true) {
30522 set_current_state(TASK_INTERRUPTIBLE);
30523 - data = atomic_xchg(&lis3_dev.count, 0);
30524 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30525 if (data)
30526 break;
30527
30528 @@ -585,7 +585,7 @@ out:
30529 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30530 {
30531 poll_wait(file, &lis3_dev.misc_wait, wait);
30532 - if (atomic_read(&lis3_dev.count))
30533 + if (atomic_read_unchecked(&lis3_dev.count))
30534 return POLLIN | POLLRDNORM;
30535 return 0;
30536 }
30537 diff -urNp linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.h
30538 --- linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
30539 +++ linux-3.0.8/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
30540 @@ -265,7 +265,7 @@ struct lis3lv02d {
30541 struct input_polled_dev *idev; /* input device */
30542 struct platform_device *pdev; /* platform device */
30543 struct regulator_bulk_data regulators[2];
30544 - atomic_t count; /* interrupt count after last read */
30545 + atomic_unchecked_t count; /* interrupt count after last read */
30546 union axis_conversion ac; /* hw -> logical axis */
30547 int mapped_btns[3];
30548
30549 diff -urNp linux-3.0.8/drivers/misc/sgi-gru/gruhandles.c linux-3.0.8/drivers/misc/sgi-gru/gruhandles.c
30550 --- linux-3.0.8/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
30551 +++ linux-3.0.8/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
30552 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
30553 unsigned long nsec;
30554
30555 nsec = CLKS2NSEC(clks);
30556 - atomic_long_inc(&mcs_op_statistics[op].count);
30557 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
30558 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
30559 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
30560 if (mcs_op_statistics[op].max < nsec)
30561 mcs_op_statistics[op].max = nsec;
30562 }
30563 diff -urNp linux-3.0.8/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.8/drivers/misc/sgi-gru/gruprocfs.c
30564 --- linux-3.0.8/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
30565 +++ linux-3.0.8/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
30566 @@ -32,9 +32,9 @@
30567
30568 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30569
30570 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30571 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30572 {
30573 - unsigned long val = atomic_long_read(v);
30574 + unsigned long val = atomic_long_read_unchecked(v);
30575
30576 seq_printf(s, "%16lu %s\n", val, id);
30577 }
30578 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30579
30580 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30581 for (op = 0; op < mcsop_last; op++) {
30582 - count = atomic_long_read(&mcs_op_statistics[op].count);
30583 - total = atomic_long_read(&mcs_op_statistics[op].total);
30584 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30585 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30586 max = mcs_op_statistics[op].max;
30587 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30588 count ? total / count : 0, max);
30589 diff -urNp linux-3.0.8/drivers/misc/sgi-gru/grutables.h linux-3.0.8/drivers/misc/sgi-gru/grutables.h
30590 --- linux-3.0.8/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
30591 +++ linux-3.0.8/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
30592 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30593 * GRU statistics.
30594 */
30595 struct gru_stats_s {
30596 - atomic_long_t vdata_alloc;
30597 - atomic_long_t vdata_free;
30598 - atomic_long_t gts_alloc;
30599 - atomic_long_t gts_free;
30600 - atomic_long_t gms_alloc;
30601 - atomic_long_t gms_free;
30602 - atomic_long_t gts_double_allocate;
30603 - atomic_long_t assign_context;
30604 - atomic_long_t assign_context_failed;
30605 - atomic_long_t free_context;
30606 - atomic_long_t load_user_context;
30607 - atomic_long_t load_kernel_context;
30608 - atomic_long_t lock_kernel_context;
30609 - atomic_long_t unlock_kernel_context;
30610 - atomic_long_t steal_user_context;
30611 - atomic_long_t steal_kernel_context;
30612 - atomic_long_t steal_context_failed;
30613 - atomic_long_t nopfn;
30614 - atomic_long_t asid_new;
30615 - atomic_long_t asid_next;
30616 - atomic_long_t asid_wrap;
30617 - atomic_long_t asid_reuse;
30618 - atomic_long_t intr;
30619 - atomic_long_t intr_cbr;
30620 - atomic_long_t intr_tfh;
30621 - atomic_long_t intr_spurious;
30622 - atomic_long_t intr_mm_lock_failed;
30623 - atomic_long_t call_os;
30624 - atomic_long_t call_os_wait_queue;
30625 - atomic_long_t user_flush_tlb;
30626 - atomic_long_t user_unload_context;
30627 - atomic_long_t user_exception;
30628 - atomic_long_t set_context_option;
30629 - atomic_long_t check_context_retarget_intr;
30630 - atomic_long_t check_context_unload;
30631 - atomic_long_t tlb_dropin;
30632 - atomic_long_t tlb_preload_page;
30633 - atomic_long_t tlb_dropin_fail_no_asid;
30634 - atomic_long_t tlb_dropin_fail_upm;
30635 - atomic_long_t tlb_dropin_fail_invalid;
30636 - atomic_long_t tlb_dropin_fail_range_active;
30637 - atomic_long_t tlb_dropin_fail_idle;
30638 - atomic_long_t tlb_dropin_fail_fmm;
30639 - atomic_long_t tlb_dropin_fail_no_exception;
30640 - atomic_long_t tfh_stale_on_fault;
30641 - atomic_long_t mmu_invalidate_range;
30642 - atomic_long_t mmu_invalidate_page;
30643 - atomic_long_t flush_tlb;
30644 - atomic_long_t flush_tlb_gru;
30645 - atomic_long_t flush_tlb_gru_tgh;
30646 - atomic_long_t flush_tlb_gru_zero_asid;
30647 -
30648 - atomic_long_t copy_gpa;
30649 - atomic_long_t read_gpa;
30650 -
30651 - atomic_long_t mesq_receive;
30652 - atomic_long_t mesq_receive_none;
30653 - atomic_long_t mesq_send;
30654 - atomic_long_t mesq_send_failed;
30655 - atomic_long_t mesq_noop;
30656 - atomic_long_t mesq_send_unexpected_error;
30657 - atomic_long_t mesq_send_lb_overflow;
30658 - atomic_long_t mesq_send_qlimit_reached;
30659 - atomic_long_t mesq_send_amo_nacked;
30660 - atomic_long_t mesq_send_put_nacked;
30661 - atomic_long_t mesq_page_overflow;
30662 - atomic_long_t mesq_qf_locked;
30663 - atomic_long_t mesq_qf_noop_not_full;
30664 - atomic_long_t mesq_qf_switch_head_failed;
30665 - atomic_long_t mesq_qf_unexpected_error;
30666 - atomic_long_t mesq_noop_unexpected_error;
30667 - atomic_long_t mesq_noop_lb_overflow;
30668 - atomic_long_t mesq_noop_qlimit_reached;
30669 - atomic_long_t mesq_noop_amo_nacked;
30670 - atomic_long_t mesq_noop_put_nacked;
30671 - atomic_long_t mesq_noop_page_overflow;
30672 + atomic_long_unchecked_t vdata_alloc;
30673 + atomic_long_unchecked_t vdata_free;
30674 + atomic_long_unchecked_t gts_alloc;
30675 + atomic_long_unchecked_t gts_free;
30676 + atomic_long_unchecked_t gms_alloc;
30677 + atomic_long_unchecked_t gms_free;
30678 + atomic_long_unchecked_t gts_double_allocate;
30679 + atomic_long_unchecked_t assign_context;
30680 + atomic_long_unchecked_t assign_context_failed;
30681 + atomic_long_unchecked_t free_context;
30682 + atomic_long_unchecked_t load_user_context;
30683 + atomic_long_unchecked_t load_kernel_context;
30684 + atomic_long_unchecked_t lock_kernel_context;
30685 + atomic_long_unchecked_t unlock_kernel_context;
30686 + atomic_long_unchecked_t steal_user_context;
30687 + atomic_long_unchecked_t steal_kernel_context;
30688 + atomic_long_unchecked_t steal_context_failed;
30689 + atomic_long_unchecked_t nopfn;
30690 + atomic_long_unchecked_t asid_new;
30691 + atomic_long_unchecked_t asid_next;
30692 + atomic_long_unchecked_t asid_wrap;
30693 + atomic_long_unchecked_t asid_reuse;
30694 + atomic_long_unchecked_t intr;
30695 + atomic_long_unchecked_t intr_cbr;
30696 + atomic_long_unchecked_t intr_tfh;
30697 + atomic_long_unchecked_t intr_spurious;
30698 + atomic_long_unchecked_t intr_mm_lock_failed;
30699 + atomic_long_unchecked_t call_os;
30700 + atomic_long_unchecked_t call_os_wait_queue;
30701 + atomic_long_unchecked_t user_flush_tlb;
30702 + atomic_long_unchecked_t user_unload_context;
30703 + atomic_long_unchecked_t user_exception;
30704 + atomic_long_unchecked_t set_context_option;
30705 + atomic_long_unchecked_t check_context_retarget_intr;
30706 + atomic_long_unchecked_t check_context_unload;
30707 + atomic_long_unchecked_t tlb_dropin;
30708 + atomic_long_unchecked_t tlb_preload_page;
30709 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30710 + atomic_long_unchecked_t tlb_dropin_fail_upm;
30711 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
30712 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
30713 + atomic_long_unchecked_t tlb_dropin_fail_idle;
30714 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
30715 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30716 + atomic_long_unchecked_t tfh_stale_on_fault;
30717 + atomic_long_unchecked_t mmu_invalidate_range;
30718 + atomic_long_unchecked_t mmu_invalidate_page;
30719 + atomic_long_unchecked_t flush_tlb;
30720 + atomic_long_unchecked_t flush_tlb_gru;
30721 + atomic_long_unchecked_t flush_tlb_gru_tgh;
30722 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30723 +
30724 + atomic_long_unchecked_t copy_gpa;
30725 + atomic_long_unchecked_t read_gpa;
30726 +
30727 + atomic_long_unchecked_t mesq_receive;
30728 + atomic_long_unchecked_t mesq_receive_none;
30729 + atomic_long_unchecked_t mesq_send;
30730 + atomic_long_unchecked_t mesq_send_failed;
30731 + atomic_long_unchecked_t mesq_noop;
30732 + atomic_long_unchecked_t mesq_send_unexpected_error;
30733 + atomic_long_unchecked_t mesq_send_lb_overflow;
30734 + atomic_long_unchecked_t mesq_send_qlimit_reached;
30735 + atomic_long_unchecked_t mesq_send_amo_nacked;
30736 + atomic_long_unchecked_t mesq_send_put_nacked;
30737 + atomic_long_unchecked_t mesq_page_overflow;
30738 + atomic_long_unchecked_t mesq_qf_locked;
30739 + atomic_long_unchecked_t mesq_qf_noop_not_full;
30740 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
30741 + atomic_long_unchecked_t mesq_qf_unexpected_error;
30742 + atomic_long_unchecked_t mesq_noop_unexpected_error;
30743 + atomic_long_unchecked_t mesq_noop_lb_overflow;
30744 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
30745 + atomic_long_unchecked_t mesq_noop_amo_nacked;
30746 + atomic_long_unchecked_t mesq_noop_put_nacked;
30747 + atomic_long_unchecked_t mesq_noop_page_overflow;
30748
30749 };
30750
30751 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30752 tghop_invalidate, mcsop_last};
30753
30754 struct mcs_op_statistic {
30755 - atomic_long_t count;
30756 - atomic_long_t total;
30757 + atomic_long_unchecked_t count;
30758 + atomic_long_unchecked_t total;
30759 unsigned long max;
30760 };
30761
30762 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30763
30764 #define STAT(id) do { \
30765 if (gru_options & OPT_STATS) \
30766 - atomic_long_inc(&gru_stats.id); \
30767 + atomic_long_inc_unchecked(&gru_stats.id); \
30768 } while (0)
30769
30770 #ifdef CONFIG_SGI_GRU_DEBUG
30771 diff -urNp linux-3.0.8/drivers/misc/sgi-xp/xpc.h linux-3.0.8/drivers/misc/sgi-xp/xpc.h
30772 --- linux-3.0.8/drivers/misc/sgi-xp/xpc.h 2011-07-21 22:17:23.000000000 -0400
30773 +++ linux-3.0.8/drivers/misc/sgi-xp/xpc.h 2011-10-11 10:44:33.000000000 -0400
30774 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
30775 void (*received_payload) (struct xpc_channel *, void *);
30776 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30777 };
30778 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30779
30780 /* struct xpc_partition act_state values (for XPC HB) */
30781
30782 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30783 /* found in xpc_main.c */
30784 extern struct device *xpc_part;
30785 extern struct device *xpc_chan;
30786 -extern struct xpc_arch_operations xpc_arch_ops;
30787 +extern xpc_arch_operations_no_const xpc_arch_ops;
30788 extern int xpc_disengage_timelimit;
30789 extern int xpc_disengage_timedout;
30790 extern int xpc_activate_IRQ_rcvd;
30791 diff -urNp linux-3.0.8/drivers/misc/sgi-xp/xpc_main.c linux-3.0.8/drivers/misc/sgi-xp/xpc_main.c
30792 --- linux-3.0.8/drivers/misc/sgi-xp/xpc_main.c 2011-07-21 22:17:23.000000000 -0400
30793 +++ linux-3.0.8/drivers/misc/sgi-xp/xpc_main.c 2011-10-11 10:44:33.000000000 -0400
30794 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30795 .notifier_call = xpc_system_die,
30796 };
30797
30798 -struct xpc_arch_operations xpc_arch_ops;
30799 +xpc_arch_operations_no_const xpc_arch_ops;
30800
30801 /*
30802 * Timer function to enforce the timelimit on the partition disengage.
30803 diff -urNp linux-3.0.8/drivers/misc/sgi-xp/xp.h linux-3.0.8/drivers/misc/sgi-xp/xp.h
30804 --- linux-3.0.8/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
30805 +++ linux-3.0.8/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
30806 @@ -289,7 +289,7 @@ struct xpc_interface {
30807 xpc_notify_func, void *);
30808 void (*received) (short, int, void *);
30809 enum xp_retval (*partid_to_nasids) (short, void *);
30810 -};
30811 +} __no_const;
30812
30813 extern struct xpc_interface xpc_interface;
30814
30815 diff -urNp linux-3.0.8/drivers/mmc/host/sdhci-pci.c linux-3.0.8/drivers/mmc/host/sdhci-pci.c
30816 --- linux-3.0.8/drivers/mmc/host/sdhci-pci.c 2011-07-21 22:17:23.000000000 -0400
30817 +++ linux-3.0.8/drivers/mmc/host/sdhci-pci.c 2011-10-11 10:44:33.000000000 -0400
30818 @@ -524,7 +524,7 @@ static const struct sdhci_pci_fixes sdhc
30819 .probe = via_probe,
30820 };
30821
30822 -static const struct pci_device_id pci_ids[] __devinitdata = {
30823 +static const struct pci_device_id pci_ids[] __devinitconst = {
30824 {
30825 .vendor = PCI_VENDOR_ID_RICOH,
30826 .device = PCI_DEVICE_ID_RICOH_R5C822,
30827 diff -urNp linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0001.c
30828 --- linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
30829 +++ linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
30830 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30831 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30832 unsigned long timeo = jiffies + HZ;
30833
30834 + pax_track_stack();
30835 +
30836 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30837 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30838 goto sleep;
30839 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30840 unsigned long initial_adr;
30841 int initial_len = len;
30842
30843 + pax_track_stack();
30844 +
30845 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30846 adr += chip->start;
30847 initial_adr = adr;
30848 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30849 int retries = 3;
30850 int ret;
30851
30852 + pax_track_stack();
30853 +
30854 adr += chip->start;
30855
30856 retry:
30857 diff -urNp linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0020.c
30858 --- linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
30859 +++ linux-3.0.8/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
30860 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30861 unsigned long cmd_addr;
30862 struct cfi_private *cfi = map->fldrv_priv;
30863
30864 + pax_track_stack();
30865 +
30866 adr += chip->start;
30867
30868 /* Ensure cmd read/writes are aligned. */
30869 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30870 DECLARE_WAITQUEUE(wait, current);
30871 int wbufsize, z;
30872
30873 + pax_track_stack();
30874 +
30875 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30876 if (adr & (map_bankwidth(map)-1))
30877 return -EINVAL;
30878 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30879 DECLARE_WAITQUEUE(wait, current);
30880 int ret = 0;
30881
30882 + pax_track_stack();
30883 +
30884 adr += chip->start;
30885
30886 /* Let's determine this according to the interleave only once */
30887 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30888 unsigned long timeo = jiffies + HZ;
30889 DECLARE_WAITQUEUE(wait, current);
30890
30891 + pax_track_stack();
30892 +
30893 adr += chip->start;
30894
30895 /* Let's determine this according to the interleave only once */
30896 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30897 unsigned long timeo = jiffies + HZ;
30898 DECLARE_WAITQUEUE(wait, current);
30899
30900 + pax_track_stack();
30901 +
30902 adr += chip->start;
30903
30904 /* Let's determine this according to the interleave only once */
30905 diff -urNp linux-3.0.8/drivers/mtd/devices/doc2000.c linux-3.0.8/drivers/mtd/devices/doc2000.c
30906 --- linux-3.0.8/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
30907 +++ linux-3.0.8/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
30908 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30909
30910 /* The ECC will not be calculated correctly if less than 512 is written */
30911 /* DBB-
30912 - if (len != 0x200 && eccbuf)
30913 + if (len != 0x200)
30914 printk(KERN_WARNING
30915 "ECC needs a full sector write (adr: %lx size %lx)\n",
30916 (long) to, (long) len);
30917 diff -urNp linux-3.0.8/drivers/mtd/devices/doc2001.c linux-3.0.8/drivers/mtd/devices/doc2001.c
30918 --- linux-3.0.8/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
30919 +++ linux-3.0.8/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
30920 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30921 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30922
30923 /* Don't allow read past end of device */
30924 - if (from >= this->totlen)
30925 + if (from >= this->totlen || !len)
30926 return -EINVAL;
30927
30928 /* Don't allow a single read to cross a 512-byte block boundary */
30929 diff -urNp linux-3.0.8/drivers/mtd/ftl.c linux-3.0.8/drivers/mtd/ftl.c
30930 --- linux-3.0.8/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
30931 +++ linux-3.0.8/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
30932 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30933 loff_t offset;
30934 uint16_t srcunitswap = cpu_to_le16(srcunit);
30935
30936 + pax_track_stack();
30937 +
30938 eun = &part->EUNInfo[srcunit];
30939 xfer = &part->XferInfo[xferunit];
30940 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30941 diff -urNp linux-3.0.8/drivers/mtd/inftlcore.c linux-3.0.8/drivers/mtd/inftlcore.c
30942 --- linux-3.0.8/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
30943 +++ linux-3.0.8/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
30944 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30945 struct inftl_oob oob;
30946 size_t retlen;
30947
30948 + pax_track_stack();
30949 +
30950 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30951 "pending=%d)\n", inftl, thisVUC, pendingblock);
30952
30953 diff -urNp linux-3.0.8/drivers/mtd/inftlmount.c linux-3.0.8/drivers/mtd/inftlmount.c
30954 --- linux-3.0.8/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
30955 +++ linux-3.0.8/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
30956 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30957 struct INFTLPartition *ip;
30958 size_t retlen;
30959
30960 + pax_track_stack();
30961 +
30962 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30963
30964 /*
30965 diff -urNp linux-3.0.8/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.8/drivers/mtd/lpddr/qinfo_probe.c
30966 --- linux-3.0.8/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
30967 +++ linux-3.0.8/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
30968 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30969 {
30970 map_word pfow_val[4];
30971
30972 + pax_track_stack();
30973 +
30974 /* Check identification string */
30975 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30976 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30977 diff -urNp linux-3.0.8/drivers/mtd/mtdchar.c linux-3.0.8/drivers/mtd/mtdchar.c
30978 --- linux-3.0.8/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
30979 +++ linux-3.0.8/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
30980 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
30981 u_long size;
30982 struct mtd_info_user info;
30983
30984 + pax_track_stack();
30985 +
30986 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30987
30988 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30989 diff -urNp linux-3.0.8/drivers/mtd/nand/denali.c linux-3.0.8/drivers/mtd/nand/denali.c
30990 --- linux-3.0.8/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
30991 +++ linux-3.0.8/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
30992 @@ -26,6 +26,7 @@
30993 #include <linux/pci.h>
30994 #include <linux/mtd/mtd.h>
30995 #include <linux/module.h>
30996 +#include <linux/slab.h>
30997
30998 #include "denali.h"
30999
31000 diff -urNp linux-3.0.8/drivers/mtd/nftlcore.c linux-3.0.8/drivers/mtd/nftlcore.c
31001 --- linux-3.0.8/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
31002 +++ linux-3.0.8/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
31003 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
31004 int inplace = 1;
31005 size_t retlen;
31006
31007 + pax_track_stack();
31008 +
31009 memset(BlockMap, 0xff, sizeof(BlockMap));
31010 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31011
31012 diff -urNp linux-3.0.8/drivers/mtd/nftlmount.c linux-3.0.8/drivers/mtd/nftlmount.c
31013 --- linux-3.0.8/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
31014 +++ linux-3.0.8/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
31015 @@ -24,6 +24,7 @@
31016 #include <asm/errno.h>
31017 #include <linux/delay.h>
31018 #include <linux/slab.h>
31019 +#include <linux/sched.h>
31020 #include <linux/mtd/mtd.h>
31021 #include <linux/mtd/nand.h>
31022 #include <linux/mtd/nftl.h>
31023 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
31024 struct mtd_info *mtd = nftl->mbd.mtd;
31025 unsigned int i;
31026
31027 + pax_track_stack();
31028 +
31029 /* Assume logical EraseSize == physical erasesize for starting the scan.
31030 We'll sort it out later if we find a MediaHeader which says otherwise */
31031 /* Actually, we won't. The new DiskOnChip driver has already scanned
31032 diff -urNp linux-3.0.8/drivers/mtd/ubi/build.c linux-3.0.8/drivers/mtd/ubi/build.c
31033 --- linux-3.0.8/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
31034 +++ linux-3.0.8/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
31035 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
31036 static int __init bytes_str_to_int(const char *str)
31037 {
31038 char *endp;
31039 - unsigned long result;
31040 + unsigned long result, scale = 1;
31041
31042 result = simple_strtoul(str, &endp, 0);
31043 if (str == endp || result >= INT_MAX) {
31044 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
31045
31046 switch (*endp) {
31047 case 'G':
31048 - result *= 1024;
31049 + scale *= 1024;
31050 case 'M':
31051 - result *= 1024;
31052 + scale *= 1024;
31053 case 'K':
31054 - result *= 1024;
31055 + scale *= 1024;
31056 if (endp[1] == 'i' && endp[2] == 'B')
31057 endp += 2;
31058 case '\0':
31059 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
31060 return -EINVAL;
31061 }
31062
31063 - return result;
31064 + if ((intoverflow_t)result*scale >= INT_MAX) {
31065 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31066 + str);
31067 + return -EINVAL;
31068 + }
31069 +
31070 + return result*scale;
31071 }
31072
31073 /**
31074 diff -urNp linux-3.0.8/drivers/net/atlx/atl2.c linux-3.0.8/drivers/net/atlx/atl2.c
31075 --- linux-3.0.8/drivers/net/atlx/atl2.c 2011-07-21 22:17:23.000000000 -0400
31076 +++ linux-3.0.8/drivers/net/atlx/atl2.c 2011-10-11 10:44:33.000000000 -0400
31077 @@ -2840,7 +2840,7 @@ static void atl2_force_ps(struct atl2_hw
31078 */
31079
31080 #define ATL2_PARAM(X, desc) \
31081 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31082 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31083 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
31084 MODULE_PARM_DESC(X, desc);
31085 #else
31086 diff -urNp linux-3.0.8/drivers/net/bna/bfa_ioc_ct.c linux-3.0.8/drivers/net/bna/bfa_ioc_ct.c
31087 --- linux-3.0.8/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
31088 +++ linux-3.0.8/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
31089 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
31090 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
31091 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
31092
31093 -static struct bfa_ioc_hwif nw_hwif_ct;
31094 +static struct bfa_ioc_hwif nw_hwif_ct = {
31095 + .ioc_pll_init = bfa_ioc_ct_pll_init,
31096 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
31097 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
31098 + .ioc_reg_init = bfa_ioc_ct_reg_init,
31099 + .ioc_map_port = bfa_ioc_ct_map_port,
31100 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
31101 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
31102 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
31103 + .ioc_sync_start = bfa_ioc_ct_sync_start,
31104 + .ioc_sync_join = bfa_ioc_ct_sync_join,
31105 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
31106 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
31107 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
31108 +};
31109
31110 /**
31111 * Called from bfa_ioc_attach() to map asic specific calls.
31112 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
31113 void
31114 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
31115 {
31116 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
31117 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
31118 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
31119 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
31120 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
31121 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
31122 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
31123 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
31124 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
31125 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
31126 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
31127 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
31128 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
31129 -
31130 ioc->ioc_hwif = &nw_hwif_ct;
31131 }
31132
31133 diff -urNp linux-3.0.8/drivers/net/bna/bnad.c linux-3.0.8/drivers/net/bna/bnad.c
31134 --- linux-3.0.8/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
31135 +++ linux-3.0.8/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
31136 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31137 struct bna_intr_info *intr_info =
31138 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
31139 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
31140 - struct bna_tx_event_cbfn tx_cbfn;
31141 + static struct bna_tx_event_cbfn tx_cbfn = {
31142 + /* Initialize the tx event handlers */
31143 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
31144 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
31145 + .tx_stall_cbfn = bnad_cb_tx_stall,
31146 + .tx_resume_cbfn = bnad_cb_tx_resume,
31147 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
31148 + };
31149 struct bna_tx *tx;
31150 unsigned long flags;
31151
31152 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31153 tx_config->txq_depth = bnad->txq_depth;
31154 tx_config->tx_type = BNA_TX_T_REGULAR;
31155
31156 - /* Initialize the tx event handlers */
31157 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
31158 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
31159 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
31160 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
31161 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
31162 -
31163 /* Get BNA's resource requirement for one tx object */
31164 spin_lock_irqsave(&bnad->bna_lock, flags);
31165 bna_tx_res_req(bnad->num_txq_per_tx,
31166 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
31167 struct bna_intr_info *intr_info =
31168 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
31169 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
31170 - struct bna_rx_event_cbfn rx_cbfn;
31171 + static struct bna_rx_event_cbfn rx_cbfn = {
31172 + /* Initialize the Rx event handlers */
31173 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
31174 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
31175 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
31176 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
31177 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
31178 + .rx_post_cbfn = bnad_cb_rx_post
31179 + };
31180 struct bna_rx *rx;
31181 unsigned long flags;
31182
31183 /* Initialize the Rx object configuration */
31184 bnad_init_rx_config(bnad, rx_config);
31185
31186 - /* Initialize the Rx event handlers */
31187 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
31188 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
31189 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
31190 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
31191 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
31192 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
31193 -
31194 /* Get BNA's resource requirement for one Rx object */
31195 spin_lock_irqsave(&bnad->bna_lock, flags);
31196 bna_rx_res_req(rx_config, res_info);
31197 diff -urNp linux-3.0.8/drivers/net/bnx2.c linux-3.0.8/drivers/net/bnx2.c
31198 --- linux-3.0.8/drivers/net/bnx2.c 2011-10-24 08:05:30.000000000 -0400
31199 +++ linux-3.0.8/drivers/net/bnx2.c 2011-10-16 21:55:27.000000000 -0400
31200 @@ -5831,6 +5831,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31201 int rc = 0;
31202 u32 magic, csum;
31203
31204 + pax_track_stack();
31205 +
31206 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31207 goto test_nvram_done;
31208
31209 diff -urNp linux-3.0.8/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.8/drivers/net/bnx2x/bnx2x_ethtool.c
31210 --- linux-3.0.8/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
31211 +++ linux-3.0.8/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
31212 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
31213 int i, rc;
31214 u32 magic, crc;
31215
31216 + pax_track_stack();
31217 +
31218 if (BP_NOMCP(bp))
31219 return 0;
31220
31221 diff -urNp linux-3.0.8/drivers/net/can/mscan/mscan.c linux-3.0.8/drivers/net/can/mscan/mscan.c
31222 --- linux-3.0.8/drivers/net/can/mscan/mscan.c 2011-07-21 22:17:23.000000000 -0400
31223 +++ linux-3.0.8/drivers/net/can/mscan/mscan.c 2011-10-17 02:51:46.000000000 -0400
31224 @@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(stru
31225 void __iomem *data = &regs->tx.dsr1_0;
31226 u16 *payload = (u16 *)frame->data;
31227
31228 - /* It is safe to write into dsr[dlc+1] */
31229 - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
31230 + for (i = 0; i < frame->can_dlc / 2; i++) {
31231 out_be16(data, *payload++);
31232 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
31233 }
31234 + /* write remaining byte if necessary */
31235 + if (frame->can_dlc & 1)
31236 + out_8(data, frame->data[frame->can_dlc - 1]);
31237 }
31238
31239 out_8(&regs->tx.dlr, frame->can_dlc);
31240 @@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct ne
31241 void __iomem *data = &regs->rx.dsr1_0;
31242 u16 *payload = (u16 *)frame->data;
31243
31244 - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
31245 + for (i = 0; i < frame->can_dlc / 2; i++) {
31246 *payload++ = in_be16(data);
31247 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
31248 }
31249 + /* read remaining byte if necessary */
31250 + if (frame->can_dlc & 1)
31251 + frame->data[frame->can_dlc - 1] = in_8(data);
31252 }
31253
31254 out_8(&regs->canrflg, MSCAN_RXF);
31255 diff -urNp linux-3.0.8/drivers/net/cxgb3/l2t.h linux-3.0.8/drivers/net/cxgb3/l2t.h
31256 --- linux-3.0.8/drivers/net/cxgb3/l2t.h 2011-10-24 08:05:30.000000000 -0400
31257 +++ linux-3.0.8/drivers/net/cxgb3/l2t.h 2011-10-16 21:55:27.000000000 -0400
31258 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
31259 */
31260 struct l2t_skb_cb {
31261 arp_failure_handler_func arp_failure_handler;
31262 -};
31263 +} __no_const;
31264
31265 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
31266
31267 diff -urNp linux-3.0.8/drivers/net/cxgb4/cxgb4_main.c linux-3.0.8/drivers/net/cxgb4/cxgb4_main.c
31268 --- linux-3.0.8/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
31269 +++ linux-3.0.8/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
31270 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
31271 unsigned int nchan = adap->params.nports;
31272 struct msix_entry entries[MAX_INGQ + 1];
31273
31274 + pax_track_stack();
31275 +
31276 for (i = 0; i < ARRAY_SIZE(entries); ++i)
31277 entries[i].entry = i;
31278
31279 diff -urNp linux-3.0.8/drivers/net/cxgb4/t4_hw.c linux-3.0.8/drivers/net/cxgb4/t4_hw.c
31280 --- linux-3.0.8/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
31281 +++ linux-3.0.8/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
31282 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
31283 u8 vpd[VPD_LEN], csum;
31284 unsigned int vpdr_len, kw_offset, id_len;
31285
31286 + pax_track_stack();
31287 +
31288 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
31289 if (ret < 0)
31290 return ret;
31291 diff -urNp linux-3.0.8/drivers/net/e1000e/82571.c linux-3.0.8/drivers/net/e1000e/82571.c
31292 --- linux-3.0.8/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
31293 +++ linux-3.0.8/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
31294 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
31295 {
31296 struct e1000_hw *hw = &adapter->hw;
31297 struct e1000_mac_info *mac = &hw->mac;
31298 - struct e1000_mac_operations *func = &mac->ops;
31299 + e1000_mac_operations_no_const *func = &mac->ops;
31300 u32 swsm = 0;
31301 u32 swsm2 = 0;
31302 bool force_clear_smbi = false;
31303 diff -urNp linux-3.0.8/drivers/net/e1000e/es2lan.c linux-3.0.8/drivers/net/e1000e/es2lan.c
31304 --- linux-3.0.8/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
31305 +++ linux-3.0.8/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
31306 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
31307 {
31308 struct e1000_hw *hw = &adapter->hw;
31309 struct e1000_mac_info *mac = &hw->mac;
31310 - struct e1000_mac_operations *func = &mac->ops;
31311 + e1000_mac_operations_no_const *func = &mac->ops;
31312
31313 /* Set media type */
31314 switch (adapter->pdev->device) {
31315 diff -urNp linux-3.0.8/drivers/net/e1000e/hw.h linux-3.0.8/drivers/net/e1000e/hw.h
31316 --- linux-3.0.8/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
31317 +++ linux-3.0.8/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
31318 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
31319 void (*write_vfta)(struct e1000_hw *, u32, u32);
31320 s32 (*read_mac_addr)(struct e1000_hw *);
31321 };
31322 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31323
31324 /* Function pointers for the PHY. */
31325 struct e1000_phy_operations {
31326 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
31327 void (*power_up)(struct e1000_hw *);
31328 void (*power_down)(struct e1000_hw *);
31329 };
31330 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31331
31332 /* Function pointers for the NVM. */
31333 struct e1000_nvm_operations {
31334 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
31335 s32 (*validate)(struct e1000_hw *);
31336 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31337 };
31338 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31339
31340 struct e1000_mac_info {
31341 - struct e1000_mac_operations ops;
31342 + e1000_mac_operations_no_const ops;
31343 u8 addr[ETH_ALEN];
31344 u8 perm_addr[ETH_ALEN];
31345
31346 @@ -853,7 +856,7 @@ struct e1000_mac_info {
31347 };
31348
31349 struct e1000_phy_info {
31350 - struct e1000_phy_operations ops;
31351 + e1000_phy_operations_no_const ops;
31352
31353 enum e1000_phy_type type;
31354
31355 @@ -887,7 +890,7 @@ struct e1000_phy_info {
31356 };
31357
31358 struct e1000_nvm_info {
31359 - struct e1000_nvm_operations ops;
31360 + e1000_nvm_operations_no_const ops;
31361
31362 enum e1000_nvm_type type;
31363 enum e1000_nvm_override override;
31364 diff -urNp linux-3.0.8/drivers/net/fealnx.c linux-3.0.8/drivers/net/fealnx.c
31365 --- linux-3.0.8/drivers/net/fealnx.c 2011-07-21 22:17:23.000000000 -0400
31366 +++ linux-3.0.8/drivers/net/fealnx.c 2011-10-11 10:44:33.000000000 -0400
31367 @@ -150,7 +150,7 @@ struct chip_info {
31368 int flags;
31369 };
31370
31371 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
31372 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
31373 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31374 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
31375 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31376 diff -urNp linux-3.0.8/drivers/net/hamradio/6pack.c linux-3.0.8/drivers/net/hamradio/6pack.c
31377 --- linux-3.0.8/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
31378 +++ linux-3.0.8/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
31379 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
31380 unsigned char buf[512];
31381 int count1;
31382
31383 + pax_track_stack();
31384 +
31385 if (!count)
31386 return;
31387
31388 diff -urNp linux-3.0.8/drivers/net/igb/e1000_hw.h linux-3.0.8/drivers/net/igb/e1000_hw.h
31389 --- linux-3.0.8/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
31390 +++ linux-3.0.8/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
31391 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
31392 s32 (*read_mac_addr)(struct e1000_hw *);
31393 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
31394 };
31395 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31396
31397 struct e1000_phy_operations {
31398 s32 (*acquire)(struct e1000_hw *);
31399 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
31400 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31401 s32 (*write_reg)(struct e1000_hw *, u32, u16);
31402 };
31403 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31404
31405 struct e1000_nvm_operations {
31406 s32 (*acquire)(struct e1000_hw *);
31407 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
31408 s32 (*update)(struct e1000_hw *);
31409 s32 (*validate)(struct e1000_hw *);
31410 };
31411 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31412
31413 struct e1000_info {
31414 s32 (*get_invariants)(struct e1000_hw *);
31415 @@ -350,7 +353,7 @@ struct e1000_info {
31416 extern const struct e1000_info e1000_82575_info;
31417
31418 struct e1000_mac_info {
31419 - struct e1000_mac_operations ops;
31420 + e1000_mac_operations_no_const ops;
31421
31422 u8 addr[6];
31423 u8 perm_addr[6];
31424 @@ -388,7 +391,7 @@ struct e1000_mac_info {
31425 };
31426
31427 struct e1000_phy_info {
31428 - struct e1000_phy_operations ops;
31429 + e1000_phy_operations_no_const ops;
31430
31431 enum e1000_phy_type type;
31432
31433 @@ -423,7 +426,7 @@ struct e1000_phy_info {
31434 };
31435
31436 struct e1000_nvm_info {
31437 - struct e1000_nvm_operations ops;
31438 + e1000_nvm_operations_no_const ops;
31439 enum e1000_nvm_type type;
31440 enum e1000_nvm_override override;
31441
31442 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
31443 s32 (*check_for_ack)(struct e1000_hw *, u16);
31444 s32 (*check_for_rst)(struct e1000_hw *, u16);
31445 };
31446 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31447
31448 struct e1000_mbx_stats {
31449 u32 msgs_tx;
31450 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
31451 };
31452
31453 struct e1000_mbx_info {
31454 - struct e1000_mbx_operations ops;
31455 + e1000_mbx_operations_no_const ops;
31456 struct e1000_mbx_stats stats;
31457 u32 timeout;
31458 u32 usec_delay;
31459 diff -urNp linux-3.0.8/drivers/net/igbvf/vf.h linux-3.0.8/drivers/net/igbvf/vf.h
31460 --- linux-3.0.8/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
31461 +++ linux-3.0.8/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
31462 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
31463 s32 (*read_mac_addr)(struct e1000_hw *);
31464 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
31465 };
31466 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31467
31468 struct e1000_mac_info {
31469 - struct e1000_mac_operations ops;
31470 + e1000_mac_operations_no_const ops;
31471 u8 addr[6];
31472 u8 perm_addr[6];
31473
31474 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
31475 s32 (*check_for_ack)(struct e1000_hw *);
31476 s32 (*check_for_rst)(struct e1000_hw *);
31477 };
31478 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31479
31480 struct e1000_mbx_stats {
31481 u32 msgs_tx;
31482 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
31483 };
31484
31485 struct e1000_mbx_info {
31486 - struct e1000_mbx_operations ops;
31487 + e1000_mbx_operations_no_const ops;
31488 struct e1000_mbx_stats stats;
31489 u32 timeout;
31490 u32 usec_delay;
31491 diff -urNp linux-3.0.8/drivers/net/ixgb/ixgb_main.c linux-3.0.8/drivers/net/ixgb/ixgb_main.c
31492 --- linux-3.0.8/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
31493 +++ linux-3.0.8/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
31494 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
31495 u32 rctl;
31496 int i;
31497
31498 + pax_track_stack();
31499 +
31500 /* Check for Promiscuous and All Multicast modes */
31501
31502 rctl = IXGB_READ_REG(hw, RCTL);
31503 diff -urNp linux-3.0.8/drivers/net/ixgb/ixgb_param.c linux-3.0.8/drivers/net/ixgb/ixgb_param.c
31504 --- linux-3.0.8/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
31505 +++ linux-3.0.8/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
31506 @@ -261,6 +261,9 @@ void __devinit
31507 ixgb_check_options(struct ixgb_adapter *adapter)
31508 {
31509 int bd = adapter->bd_number;
31510 +
31511 + pax_track_stack();
31512 +
31513 if (bd >= IXGB_MAX_NIC) {
31514 pr_notice("Warning: no configuration for board #%i\n", bd);
31515 pr_notice("Using defaults for all values\n");
31516 diff -urNp linux-3.0.8/drivers/net/ixgbe/ixgbe_type.h linux-3.0.8/drivers/net/ixgbe/ixgbe_type.h
31517 --- linux-3.0.8/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
31518 +++ linux-3.0.8/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
31519 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
31520 s32 (*update_checksum)(struct ixgbe_hw *);
31521 u16 (*calc_checksum)(struct ixgbe_hw *);
31522 };
31523 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
31524
31525 struct ixgbe_mac_operations {
31526 s32 (*init_hw)(struct ixgbe_hw *);
31527 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
31528 /* Flow Control */
31529 s32 (*fc_enable)(struct ixgbe_hw *, s32);
31530 };
31531 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31532
31533 struct ixgbe_phy_operations {
31534 s32 (*identify)(struct ixgbe_hw *);
31535 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
31536 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
31537 s32 (*check_overtemp)(struct ixgbe_hw *);
31538 };
31539 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
31540
31541 struct ixgbe_eeprom_info {
31542 - struct ixgbe_eeprom_operations ops;
31543 + ixgbe_eeprom_operations_no_const ops;
31544 enum ixgbe_eeprom_type type;
31545 u32 semaphore_delay;
31546 u16 word_size;
31547 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
31548
31549 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
31550 struct ixgbe_mac_info {
31551 - struct ixgbe_mac_operations ops;
31552 + ixgbe_mac_operations_no_const ops;
31553 enum ixgbe_mac_type type;
31554 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31555 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31556 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
31557 };
31558
31559 struct ixgbe_phy_info {
31560 - struct ixgbe_phy_operations ops;
31561 + ixgbe_phy_operations_no_const ops;
31562 struct mdio_if_info mdio;
31563 enum ixgbe_phy_type type;
31564 u32 id;
31565 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
31566 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31567 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31568 };
31569 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31570
31571 struct ixgbe_mbx_stats {
31572 u32 msgs_tx;
31573 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
31574 };
31575
31576 struct ixgbe_mbx_info {
31577 - struct ixgbe_mbx_operations ops;
31578 + ixgbe_mbx_operations_no_const ops;
31579 struct ixgbe_mbx_stats stats;
31580 u32 timeout;
31581 u32 usec_delay;
31582 diff -urNp linux-3.0.8/drivers/net/ixgbevf/vf.h linux-3.0.8/drivers/net/ixgbevf/vf.h
31583 --- linux-3.0.8/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
31584 +++ linux-3.0.8/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
31585 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31586 s32 (*clear_vfta)(struct ixgbe_hw *);
31587 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31588 };
31589 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31590
31591 enum ixgbe_mac_type {
31592 ixgbe_mac_unknown = 0,
31593 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31594 };
31595
31596 struct ixgbe_mac_info {
31597 - struct ixgbe_mac_operations ops;
31598 + ixgbe_mac_operations_no_const ops;
31599 u8 addr[6];
31600 u8 perm_addr[6];
31601
31602 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31603 s32 (*check_for_ack)(struct ixgbe_hw *);
31604 s32 (*check_for_rst)(struct ixgbe_hw *);
31605 };
31606 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31607
31608 struct ixgbe_mbx_stats {
31609 u32 msgs_tx;
31610 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31611 };
31612
31613 struct ixgbe_mbx_info {
31614 - struct ixgbe_mbx_operations ops;
31615 + ixgbe_mbx_operations_no_const ops;
31616 struct ixgbe_mbx_stats stats;
31617 u32 timeout;
31618 u32 udelay;
31619 diff -urNp linux-3.0.8/drivers/net/ksz884x.c linux-3.0.8/drivers/net/ksz884x.c
31620 --- linux-3.0.8/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
31621 +++ linux-3.0.8/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
31622 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
31623 int rc;
31624 u64 counter[TOTAL_PORT_COUNTER_NUM];
31625
31626 + pax_track_stack();
31627 +
31628 mutex_lock(&hw_priv->lock);
31629 n = SWITCH_PORT_NUM;
31630 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31631 diff -urNp linux-3.0.8/drivers/net/mlx4/main.c linux-3.0.8/drivers/net/mlx4/main.c
31632 --- linux-3.0.8/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
31633 +++ linux-3.0.8/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
31634 @@ -40,6 +40,7 @@
31635 #include <linux/dma-mapping.h>
31636 #include <linux/slab.h>
31637 #include <linux/io-mapping.h>
31638 +#include <linux/sched.h>
31639
31640 #include <linux/mlx4/device.h>
31641 #include <linux/mlx4/doorbell.h>
31642 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
31643 u64 icm_size;
31644 int err;
31645
31646 + pax_track_stack();
31647 +
31648 err = mlx4_QUERY_FW(dev);
31649 if (err) {
31650 if (err == -EACCES)
31651 diff -urNp linux-3.0.8/drivers/net/niu.c linux-3.0.8/drivers/net/niu.c
31652 --- linux-3.0.8/drivers/net/niu.c 2011-10-24 08:05:21.000000000 -0400
31653 +++ linux-3.0.8/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
31654 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
31655 int i, num_irqs, err;
31656 u8 first_ldg;
31657
31658 + pax_track_stack();
31659 +
31660 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31661 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31662 ldg_num_map[i] = first_ldg + i;
31663 diff -urNp linux-3.0.8/drivers/net/pcnet32.c linux-3.0.8/drivers/net/pcnet32.c
31664 --- linux-3.0.8/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
31665 +++ linux-3.0.8/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
31666 @@ -82,7 +82,7 @@ static int cards_found;
31667 /*
31668 * VLB I/O addresses
31669 */
31670 -static unsigned int pcnet32_portlist[] __initdata =
31671 +static unsigned int pcnet32_portlist[] __devinitdata =
31672 { 0x300, 0x320, 0x340, 0x360, 0 };
31673
31674 static int pcnet32_debug;
31675 @@ -270,7 +270,7 @@ struct pcnet32_private {
31676 struct sk_buff **rx_skbuff;
31677 dma_addr_t *tx_dma_addr;
31678 dma_addr_t *rx_dma_addr;
31679 - struct pcnet32_access a;
31680 + struct pcnet32_access *a;
31681 spinlock_t lock; /* Guard lock */
31682 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31683 unsigned int rx_ring_size; /* current rx ring size */
31684 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31685 u16 val;
31686
31687 netif_wake_queue(dev);
31688 - val = lp->a.read_csr(ioaddr, CSR3);
31689 + val = lp->a->read_csr(ioaddr, CSR3);
31690 val &= 0x00ff;
31691 - lp->a.write_csr(ioaddr, CSR3, val);
31692 + lp->a->write_csr(ioaddr, CSR3, val);
31693 napi_enable(&lp->napi);
31694 }
31695
31696 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31697 r = mii_link_ok(&lp->mii_if);
31698 } else if (lp->chip_version >= PCNET32_79C970A) {
31699 ulong ioaddr = dev->base_addr; /* card base I/O address */
31700 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31701 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31702 } else { /* can not detect link on really old chips */
31703 r = 1;
31704 }
31705 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31706 pcnet32_netif_stop(dev);
31707
31708 spin_lock_irqsave(&lp->lock, flags);
31709 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31710 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31711
31712 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31713
31714 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31715 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31716 {
31717 struct pcnet32_private *lp = netdev_priv(dev);
31718 - struct pcnet32_access *a = &lp->a; /* access to registers */
31719 + struct pcnet32_access *a = lp->a; /* access to registers */
31720 ulong ioaddr = dev->base_addr; /* card base I/O address */
31721 struct sk_buff *skb; /* sk buff */
31722 int x, i; /* counters */
31723 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31724 pcnet32_netif_stop(dev);
31725
31726 spin_lock_irqsave(&lp->lock, flags);
31727 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31728 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31729
31730 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31731
31732 /* Reset the PCNET32 */
31733 - lp->a.reset(ioaddr);
31734 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31735 + lp->a->reset(ioaddr);
31736 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31737
31738 /* switch pcnet32 to 32bit mode */
31739 - lp->a.write_bcr(ioaddr, 20, 2);
31740 + lp->a->write_bcr(ioaddr, 20, 2);
31741
31742 /* purge & init rings but don't actually restart */
31743 pcnet32_restart(dev, 0x0000);
31744
31745 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31746 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31747
31748 /* Initialize Transmit buffers. */
31749 size = data_len + 15;
31750 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31751
31752 /* set int loopback in CSR15 */
31753 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31754 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31755 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31756
31757 teststatus = cpu_to_le16(0x8000);
31758 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31759 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31760
31761 /* Check status of descriptors */
31762 for (x = 0; x < numbuffs; x++) {
31763 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31764 }
31765 }
31766
31767 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31768 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31769 wmb();
31770 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31771 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31772 @@ -1015,7 +1015,7 @@ clean_up:
31773 pcnet32_restart(dev, CSR0_NORMAL);
31774 } else {
31775 pcnet32_purge_rx_ring(dev);
31776 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31777 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31778 }
31779 spin_unlock_irqrestore(&lp->lock, flags);
31780
31781 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31782 enum ethtool_phys_id_state state)
31783 {
31784 struct pcnet32_private *lp = netdev_priv(dev);
31785 - struct pcnet32_access *a = &lp->a;
31786 + struct pcnet32_access *a = lp->a;
31787 ulong ioaddr = dev->base_addr;
31788 unsigned long flags;
31789 int i;
31790 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31791 {
31792 int csr5;
31793 struct pcnet32_private *lp = netdev_priv(dev);
31794 - struct pcnet32_access *a = &lp->a;
31795 + struct pcnet32_access *a = lp->a;
31796 ulong ioaddr = dev->base_addr;
31797 int ticks;
31798
31799 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31800 spin_lock_irqsave(&lp->lock, flags);
31801 if (pcnet32_tx(dev)) {
31802 /* reset the chip to clear the error condition, then restart */
31803 - lp->a.reset(ioaddr);
31804 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31805 + lp->a->reset(ioaddr);
31806 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31807 pcnet32_restart(dev, CSR0_START);
31808 netif_wake_queue(dev);
31809 }
31810 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31811 __napi_complete(napi);
31812
31813 /* clear interrupt masks */
31814 - val = lp->a.read_csr(ioaddr, CSR3);
31815 + val = lp->a->read_csr(ioaddr, CSR3);
31816 val &= 0x00ff;
31817 - lp->a.write_csr(ioaddr, CSR3, val);
31818 + lp->a->write_csr(ioaddr, CSR3, val);
31819
31820 /* Set interrupt enable. */
31821 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31822 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31823
31824 spin_unlock_irqrestore(&lp->lock, flags);
31825 }
31826 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31827 int i, csr0;
31828 u16 *buff = ptr;
31829 struct pcnet32_private *lp = netdev_priv(dev);
31830 - struct pcnet32_access *a = &lp->a;
31831 + struct pcnet32_access *a = lp->a;
31832 ulong ioaddr = dev->base_addr;
31833 unsigned long flags;
31834
31835 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31836 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31837 if (lp->phymask & (1 << j)) {
31838 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31839 - lp->a.write_bcr(ioaddr, 33,
31840 + lp->a->write_bcr(ioaddr, 33,
31841 (j << 5) | i);
31842 - *buff++ = lp->a.read_bcr(ioaddr, 34);
31843 + *buff++ = lp->a->read_bcr(ioaddr, 34);
31844 }
31845 }
31846 }
31847 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31848 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31849 lp->options |= PCNET32_PORT_FD;
31850
31851 - lp->a = *a;
31852 + lp->a = a;
31853
31854 /* prior to register_netdev, dev->name is not yet correct */
31855 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31856 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31857 if (lp->mii) {
31858 /* lp->phycount and lp->phymask are set to 0 by memset above */
31859
31860 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31861 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31862 /* scan for PHYs */
31863 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31864 unsigned short id1, id2;
31865 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31866 pr_info("Found PHY %04x:%04x at address %d\n",
31867 id1, id2, i);
31868 }
31869 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31870 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31871 if (lp->phycount > 1)
31872 lp->options |= PCNET32_PORT_MII;
31873 }
31874 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31875 }
31876
31877 /* Reset the PCNET32 */
31878 - lp->a.reset(ioaddr);
31879 + lp->a->reset(ioaddr);
31880
31881 /* switch pcnet32 to 32bit mode */
31882 - lp->a.write_bcr(ioaddr, 20, 2);
31883 + lp->a->write_bcr(ioaddr, 20, 2);
31884
31885 netif_printk(lp, ifup, KERN_DEBUG, dev,
31886 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31887 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31888 (u32) (lp->init_dma_addr));
31889
31890 /* set/reset autoselect bit */
31891 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
31892 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
31893 if (lp->options & PCNET32_PORT_ASEL)
31894 val |= 2;
31895 - lp->a.write_bcr(ioaddr, 2, val);
31896 + lp->a->write_bcr(ioaddr, 2, val);
31897
31898 /* handle full duplex setting */
31899 if (lp->mii_if.full_duplex) {
31900 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
31901 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
31902 if (lp->options & PCNET32_PORT_FD) {
31903 val |= 1;
31904 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31905 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31906 if (lp->chip_version == 0x2627)
31907 val |= 3;
31908 }
31909 - lp->a.write_bcr(ioaddr, 9, val);
31910 + lp->a->write_bcr(ioaddr, 9, val);
31911 }
31912
31913 /* set/reset GPSI bit in test register */
31914 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31915 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31916 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31917 val |= 0x10;
31918 - lp->a.write_csr(ioaddr, 124, val);
31919 + lp->a->write_csr(ioaddr, 124, val);
31920
31921 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31922 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31923 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31924 * duplex, and/or enable auto negotiation, and clear DANAS
31925 */
31926 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31927 - lp->a.write_bcr(ioaddr, 32,
31928 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
31929 + lp->a->write_bcr(ioaddr, 32,
31930 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
31931 /* disable Auto Negotiation, set 10Mpbs, HD */
31932 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31933 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31934 if (lp->options & PCNET32_PORT_FD)
31935 val |= 0x10;
31936 if (lp->options & PCNET32_PORT_100)
31937 val |= 0x08;
31938 - lp->a.write_bcr(ioaddr, 32, val);
31939 + lp->a->write_bcr(ioaddr, 32, val);
31940 } else {
31941 if (lp->options & PCNET32_PORT_ASEL) {
31942 - lp->a.write_bcr(ioaddr, 32,
31943 - lp->a.read_bcr(ioaddr,
31944 + lp->a->write_bcr(ioaddr, 32,
31945 + lp->a->read_bcr(ioaddr,
31946 32) | 0x0080);
31947 /* enable auto negotiate, setup, disable fd */
31948 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31949 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31950 val |= 0x20;
31951 - lp->a.write_bcr(ioaddr, 32, val);
31952 + lp->a->write_bcr(ioaddr, 32, val);
31953 }
31954 }
31955 } else {
31956 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31957 * There is really no good other way to handle multiple PHYs
31958 * other than turning off all automatics
31959 */
31960 - val = lp->a.read_bcr(ioaddr, 2);
31961 - lp->a.write_bcr(ioaddr, 2, val & ~2);
31962 - val = lp->a.read_bcr(ioaddr, 32);
31963 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31964 + val = lp->a->read_bcr(ioaddr, 2);
31965 + lp->a->write_bcr(ioaddr, 2, val & ~2);
31966 + val = lp->a->read_bcr(ioaddr, 32);
31967 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31968
31969 if (!(lp->options & PCNET32_PORT_ASEL)) {
31970 /* setup ecmd */
31971 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31972 ethtool_cmd_speed_set(&ecmd,
31973 (lp->options & PCNET32_PORT_100) ?
31974 SPEED_100 : SPEED_10);
31975 - bcr9 = lp->a.read_bcr(ioaddr, 9);
31976 + bcr9 = lp->a->read_bcr(ioaddr, 9);
31977
31978 if (lp->options & PCNET32_PORT_FD) {
31979 ecmd.duplex = DUPLEX_FULL;
31980 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31981 ecmd.duplex = DUPLEX_HALF;
31982 bcr9 |= ~(1 << 0);
31983 }
31984 - lp->a.write_bcr(ioaddr, 9, bcr9);
31985 + lp->a->write_bcr(ioaddr, 9, bcr9);
31986 }
31987
31988 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31989 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31990
31991 #ifdef DO_DXSUFLO
31992 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31993 - val = lp->a.read_csr(ioaddr, CSR3);
31994 + val = lp->a->read_csr(ioaddr, CSR3);
31995 val |= 0x40;
31996 - lp->a.write_csr(ioaddr, CSR3, val);
31997 + lp->a->write_csr(ioaddr, CSR3, val);
31998 }
31999 #endif
32000
32001 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
32002 napi_enable(&lp->napi);
32003
32004 /* Re-initialize the PCNET32, and start it when done. */
32005 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
32006 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
32007 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
32008 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
32009
32010 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32011 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
32012 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
32013 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
32014
32015 netif_start_queue(dev);
32016
32017 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
32018
32019 i = 0;
32020 while (i++ < 100)
32021 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
32022 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
32023 break;
32024 /*
32025 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
32026 * reports that doing so triggers a bug in the '974.
32027 */
32028 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
32029 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
32030
32031 netif_printk(lp, ifup, KERN_DEBUG, dev,
32032 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
32033 i,
32034 (u32) (lp->init_dma_addr),
32035 - lp->a.read_csr(ioaddr, CSR0));
32036 + lp->a->read_csr(ioaddr, CSR0));
32037
32038 spin_unlock_irqrestore(&lp->lock, flags);
32039
32040 @@ -2218,7 +2218,7 @@ err_free_ring:
32041 * Switch back to 16bit mode to avoid problems with dumb
32042 * DOS packet driver after a warm reboot
32043 */
32044 - lp->a.write_bcr(ioaddr, 20, 4);
32045 + lp->a->write_bcr(ioaddr, 20, 4);
32046
32047 err_free_irq:
32048 spin_unlock_irqrestore(&lp->lock, flags);
32049 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
32050
32051 /* wait for stop */
32052 for (i = 0; i < 100; i++)
32053 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
32054 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
32055 break;
32056
32057 if (i >= 100)
32058 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
32059 return;
32060
32061 /* ReInit Ring */
32062 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
32063 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
32064 i = 0;
32065 while (i++ < 1000)
32066 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
32067 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
32068 break;
32069
32070 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
32071 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
32072 }
32073
32074 static void pcnet32_tx_timeout(struct net_device *dev)
32075 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
32076 /* Transmitter timeout, serious problems. */
32077 if (pcnet32_debug & NETIF_MSG_DRV)
32078 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
32079 - dev->name, lp->a.read_csr(ioaddr, CSR0));
32080 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32081 + dev->name, lp->a->read_csr(ioaddr, CSR0));
32082 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32083 dev->stats.tx_errors++;
32084 if (netif_msg_tx_err(lp)) {
32085 int i;
32086 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32087
32088 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
32089 "%s() called, csr0 %4.4x\n",
32090 - __func__, lp->a.read_csr(ioaddr, CSR0));
32091 + __func__, lp->a->read_csr(ioaddr, CSR0));
32092
32093 /* Default status -- will not enable Successful-TxDone
32094 * interrupt when that option is available to us.
32095 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32096 dev->stats.tx_bytes += skb->len;
32097
32098 /* Trigger an immediate send poll. */
32099 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32100 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32101
32102 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
32103 lp->tx_full = 1;
32104 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
32105
32106 spin_lock(&lp->lock);
32107
32108 - csr0 = lp->a.read_csr(ioaddr, CSR0);
32109 + csr0 = lp->a->read_csr(ioaddr, CSR0);
32110 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
32111 if (csr0 == 0xffff)
32112 break; /* PCMCIA remove happened */
32113 /* Acknowledge all of the current interrupt sources ASAP. */
32114 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32115 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32116
32117 netif_printk(lp, intr, KERN_DEBUG, dev,
32118 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
32119 - csr0, lp->a.read_csr(ioaddr, CSR0));
32120 + csr0, lp->a->read_csr(ioaddr, CSR0));
32121
32122 /* Log misc errors. */
32123 if (csr0 & 0x4000)
32124 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
32125 if (napi_schedule_prep(&lp->napi)) {
32126 u16 val;
32127 /* set interrupt masks */
32128 - val = lp->a.read_csr(ioaddr, CSR3);
32129 + val = lp->a->read_csr(ioaddr, CSR3);
32130 val |= 0x5f00;
32131 - lp->a.write_csr(ioaddr, CSR3, val);
32132 + lp->a->write_csr(ioaddr, CSR3, val);
32133
32134 __napi_schedule(&lp->napi);
32135 break;
32136 }
32137 - csr0 = lp->a.read_csr(ioaddr, CSR0);
32138 + csr0 = lp->a->read_csr(ioaddr, CSR0);
32139 }
32140
32141 netif_printk(lp, intr, KERN_DEBUG, dev,
32142 "exiting interrupt, csr0=%#4.4x\n",
32143 - lp->a.read_csr(ioaddr, CSR0));
32144 + lp->a->read_csr(ioaddr, CSR0));
32145
32146 spin_unlock(&lp->lock);
32147
32148 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
32149
32150 spin_lock_irqsave(&lp->lock, flags);
32151
32152 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32153 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32154
32155 netif_printk(lp, ifdown, KERN_DEBUG, dev,
32156 "Shutting down ethercard, status was %2.2x\n",
32157 - lp->a.read_csr(ioaddr, CSR0));
32158 + lp->a->read_csr(ioaddr, CSR0));
32159
32160 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
32161 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32162 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32163
32164 /*
32165 * Switch back to 16bit mode to avoid problems with dumb
32166 * DOS packet driver after a warm reboot
32167 */
32168 - lp->a.write_bcr(ioaddr, 20, 4);
32169 + lp->a->write_bcr(ioaddr, 20, 4);
32170
32171 spin_unlock_irqrestore(&lp->lock, flags);
32172
32173 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
32174 unsigned long flags;
32175
32176 spin_lock_irqsave(&lp->lock, flags);
32177 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32178 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32179 spin_unlock_irqrestore(&lp->lock, flags);
32180
32181 return &dev->stats;
32182 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
32183 if (dev->flags & IFF_ALLMULTI) {
32184 ib->filter[0] = cpu_to_le32(~0U);
32185 ib->filter[1] = cpu_to_le32(~0U);
32186 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32187 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32188 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32189 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32190 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32191 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32192 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32193 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32194 return;
32195 }
32196 /* clear the multicast filter */
32197 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
32198 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
32199 }
32200 for (i = 0; i < 4; i++)
32201 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
32202 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
32203 le16_to_cpu(mcast_table[i]));
32204 }
32205
32206 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
32207
32208 spin_lock_irqsave(&lp->lock, flags);
32209 suspended = pcnet32_suspend(dev, &flags, 0);
32210 - csr15 = lp->a.read_csr(ioaddr, CSR15);
32211 + csr15 = lp->a->read_csr(ioaddr, CSR15);
32212 if (dev->flags & IFF_PROMISC) {
32213 /* Log any net taps. */
32214 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
32215 lp->init_block->mode =
32216 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
32217 7);
32218 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
32219 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
32220 } else {
32221 lp->init_block->mode =
32222 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
32223 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32224 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32225 pcnet32_load_multicast(dev);
32226 }
32227
32228 if (suspended) {
32229 int csr5;
32230 /* clear SUSPEND (SPND) - CSR5 bit 0 */
32231 - csr5 = lp->a.read_csr(ioaddr, CSR5);
32232 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32233 + csr5 = lp->a->read_csr(ioaddr, CSR5);
32234 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32235 } else {
32236 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32237 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32238 pcnet32_restart(dev, CSR0_NORMAL);
32239 netif_wake_queue(dev);
32240 }
32241 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
32242 if (!lp->mii)
32243 return 0;
32244
32245 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32246 - val_out = lp->a.read_bcr(ioaddr, 34);
32247 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32248 + val_out = lp->a->read_bcr(ioaddr, 34);
32249
32250 return val_out;
32251 }
32252 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
32253 if (!lp->mii)
32254 return;
32255
32256 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32257 - lp->a.write_bcr(ioaddr, 34, val);
32258 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32259 + lp->a->write_bcr(ioaddr, 34, val);
32260 }
32261
32262 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32263 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
32264 curr_link = mii_link_ok(&lp->mii_if);
32265 } else {
32266 ulong ioaddr = dev->base_addr; /* card base I/O address */
32267 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
32268 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
32269 }
32270 if (!curr_link) {
32271 if (prev_link || verbose) {
32272 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
32273 (ecmd.duplex == DUPLEX_FULL)
32274 ? "full" : "half");
32275 }
32276 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
32277 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
32278 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
32279 if (lp->mii_if.full_duplex)
32280 bcr9 |= (1 << 0);
32281 else
32282 bcr9 &= ~(1 << 0);
32283 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
32284 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
32285 }
32286 } else {
32287 netif_info(lp, link, dev, "link up\n");
32288 diff -urNp linux-3.0.8/drivers/net/ppp_generic.c linux-3.0.8/drivers/net/ppp_generic.c
32289 --- linux-3.0.8/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
32290 +++ linux-3.0.8/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
32291 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
32292 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32293 struct ppp_stats stats;
32294 struct ppp_comp_stats cstats;
32295 - char *vers;
32296
32297 switch (cmd) {
32298 case SIOCGPPPSTATS:
32299 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
32300 break;
32301
32302 case SIOCGPPPVER:
32303 - vers = PPP_VERSION;
32304 - if (copy_to_user(addr, vers, strlen(vers) + 1))
32305 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32306 break;
32307 err = 0;
32308 break;
32309 diff -urNp linux-3.0.8/drivers/net/r8169.c linux-3.0.8/drivers/net/r8169.c
32310 --- linux-3.0.8/drivers/net/r8169.c 2011-10-24 08:05:21.000000000 -0400
32311 +++ linux-3.0.8/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
32312 @@ -645,12 +645,12 @@ struct rtl8169_private {
32313 struct mdio_ops {
32314 void (*write)(void __iomem *, int, int);
32315 int (*read)(void __iomem *, int);
32316 - } mdio_ops;
32317 + } __no_const mdio_ops;
32318
32319 struct pll_power_ops {
32320 void (*down)(struct rtl8169_private *);
32321 void (*up)(struct rtl8169_private *);
32322 - } pll_power_ops;
32323 + } __no_const pll_power_ops;
32324
32325 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32326 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32327 diff -urNp linux-3.0.8/drivers/net/sis190.c linux-3.0.8/drivers/net/sis190.c
32328 --- linux-3.0.8/drivers/net/sis190.c 2011-10-24 08:05:21.000000000 -0400
32329 +++ linux-3.0.8/drivers/net/sis190.c 2011-10-11 10:44:33.000000000 -0400
32330 @@ -1623,7 +1623,7 @@ static int __devinit sis190_get_mac_addr
32331 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32332 struct net_device *dev)
32333 {
32334 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32335 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32336 struct sis190_private *tp = netdev_priv(dev);
32337 struct pci_dev *isa_bridge;
32338 u8 reg, tmp8;
32339 diff -urNp linux-3.0.8/drivers/net/sundance.c linux-3.0.8/drivers/net/sundance.c
32340 --- linux-3.0.8/drivers/net/sundance.c 2011-07-21 22:17:23.000000000 -0400
32341 +++ linux-3.0.8/drivers/net/sundance.c 2011-10-11 10:44:33.000000000 -0400
32342 @@ -218,7 +218,7 @@ enum {
32343 struct pci_id_info {
32344 const char *name;
32345 };
32346 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32347 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32348 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32349 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32350 {"D-Link DFE-580TX 4 port Server Adapter"},
32351 diff -urNp linux-3.0.8/drivers/net/tg3.h linux-3.0.8/drivers/net/tg3.h
32352 --- linux-3.0.8/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
32353 +++ linux-3.0.8/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
32354 @@ -134,6 +134,7 @@
32355 #define CHIPREV_ID_5750_A0 0x4000
32356 #define CHIPREV_ID_5750_A1 0x4001
32357 #define CHIPREV_ID_5750_A3 0x4003
32358 +#define CHIPREV_ID_5750_C1 0x4201
32359 #define CHIPREV_ID_5750_C2 0x4202
32360 #define CHIPREV_ID_5752_A0_HW 0x5000
32361 #define CHIPREV_ID_5752_A0 0x6000
32362 diff -urNp linux-3.0.8/drivers/net/tokenring/abyss.c linux-3.0.8/drivers/net/tokenring/abyss.c
32363 --- linux-3.0.8/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
32364 +++ linux-3.0.8/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
32365 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
32366
32367 static int __init abyss_init (void)
32368 {
32369 - abyss_netdev_ops = tms380tr_netdev_ops;
32370 + pax_open_kernel();
32371 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32372
32373 - abyss_netdev_ops.ndo_open = abyss_open;
32374 - abyss_netdev_ops.ndo_stop = abyss_close;
32375 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32376 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32377 + pax_close_kernel();
32378
32379 return pci_register_driver(&abyss_driver);
32380 }
32381 diff -urNp linux-3.0.8/drivers/net/tokenring/madgemc.c linux-3.0.8/drivers/net/tokenring/madgemc.c
32382 --- linux-3.0.8/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
32383 +++ linux-3.0.8/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
32384 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
32385
32386 static int __init madgemc_init (void)
32387 {
32388 - madgemc_netdev_ops = tms380tr_netdev_ops;
32389 - madgemc_netdev_ops.ndo_open = madgemc_open;
32390 - madgemc_netdev_ops.ndo_stop = madgemc_close;
32391 + pax_open_kernel();
32392 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32393 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32394 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32395 + pax_close_kernel();
32396
32397 return mca_register_driver (&madgemc_driver);
32398 }
32399 diff -urNp linux-3.0.8/drivers/net/tokenring/proteon.c linux-3.0.8/drivers/net/tokenring/proteon.c
32400 --- linux-3.0.8/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
32401 +++ linux-3.0.8/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
32402 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
32403 struct platform_device *pdev;
32404 int i, num = 0, err = 0;
32405
32406 - proteon_netdev_ops = tms380tr_netdev_ops;
32407 - proteon_netdev_ops.ndo_open = proteon_open;
32408 - proteon_netdev_ops.ndo_stop = tms380tr_close;
32409 + pax_open_kernel();
32410 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32411 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32412 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32413 + pax_close_kernel();
32414
32415 err = platform_driver_register(&proteon_driver);
32416 if (err)
32417 diff -urNp linux-3.0.8/drivers/net/tokenring/skisa.c linux-3.0.8/drivers/net/tokenring/skisa.c
32418 --- linux-3.0.8/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
32419 +++ linux-3.0.8/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
32420 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32421 struct platform_device *pdev;
32422 int i, num = 0, err = 0;
32423
32424 - sk_isa_netdev_ops = tms380tr_netdev_ops;
32425 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
32426 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32427 + pax_open_kernel();
32428 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32429 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32430 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32431 + pax_close_kernel();
32432
32433 err = platform_driver_register(&sk_isa_driver);
32434 if (err)
32435 diff -urNp linux-3.0.8/drivers/net/tulip/de2104x.c linux-3.0.8/drivers/net/tulip/de2104x.c
32436 --- linux-3.0.8/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
32437 +++ linux-3.0.8/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
32438 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
32439 struct de_srom_info_leaf *il;
32440 void *bufp;
32441
32442 + pax_track_stack();
32443 +
32444 /* download entire eeprom */
32445 for (i = 0; i < DE_EEPROM_WORDS; i++)
32446 ((__le16 *)ee_data)[i] =
32447 diff -urNp linux-3.0.8/drivers/net/tulip/de4x5.c linux-3.0.8/drivers/net/tulip/de4x5.c
32448 --- linux-3.0.8/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
32449 +++ linux-3.0.8/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
32450 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
32451 for (i=0; i<ETH_ALEN; i++) {
32452 tmp.addr[i] = dev->dev_addr[i];
32453 }
32454 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32455 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32456 break;
32457
32458 case DE4X5_SET_HWADDR: /* Set the hardware address */
32459 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
32460 spin_lock_irqsave(&lp->lock, flags);
32461 memcpy(&statbuf, &lp->pktStats, ioc->len);
32462 spin_unlock_irqrestore(&lp->lock, flags);
32463 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32464 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32465 return -EFAULT;
32466 break;
32467 }
32468 diff -urNp linux-3.0.8/drivers/net/tulip/eeprom.c linux-3.0.8/drivers/net/tulip/eeprom.c
32469 --- linux-3.0.8/drivers/net/tulip/eeprom.c 2011-07-21 22:17:23.000000000 -0400
32470 +++ linux-3.0.8/drivers/net/tulip/eeprom.c 2011-10-11 10:44:33.000000000 -0400
32471 @@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
32472 {NULL}};
32473
32474
32475 -static const char *block_name[] __devinitdata = {
32476 +static const char *block_name[] __devinitconst = {
32477 "21140 non-MII",
32478 "21140 MII PHY",
32479 "21142 Serial PHY",
32480 diff -urNp linux-3.0.8/drivers/net/tulip/winbond-840.c linux-3.0.8/drivers/net/tulip/winbond-840.c
32481 --- linux-3.0.8/drivers/net/tulip/winbond-840.c 2011-07-21 22:17:23.000000000 -0400
32482 +++ linux-3.0.8/drivers/net/tulip/winbond-840.c 2011-10-11 10:44:33.000000000 -0400
32483 @@ -236,7 +236,7 @@ struct pci_id_info {
32484 int drv_flags; /* Driver use, intended as capability flags. */
32485 };
32486
32487 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32488 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32489 { /* Sometime a Level-One switch card. */
32490 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32491 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32492 diff -urNp linux-3.0.8/drivers/net/usb/hso.c linux-3.0.8/drivers/net/usb/hso.c
32493 --- linux-3.0.8/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
32494 +++ linux-3.0.8/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
32495 @@ -71,7 +71,7 @@
32496 #include <asm/byteorder.h>
32497 #include <linux/serial_core.h>
32498 #include <linux/serial.h>
32499 -
32500 +#include <asm/local.h>
32501
32502 #define MOD_AUTHOR "Option Wireless"
32503 #define MOD_DESCRIPTION "USB High Speed Option driver"
32504 @@ -257,7 +257,7 @@ struct hso_serial {
32505
32506 /* from usb_serial_port */
32507 struct tty_struct *tty;
32508 - int open_count;
32509 + local_t open_count;
32510 spinlock_t serial_lock;
32511
32512 int (*write_data) (struct hso_serial *serial);
32513 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
32514 struct urb *urb;
32515
32516 urb = serial->rx_urb[0];
32517 - if (serial->open_count > 0) {
32518 + if (local_read(&serial->open_count) > 0) {
32519 count = put_rxbuf_data(urb, serial);
32520 if (count == -1)
32521 return;
32522 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
32523 DUMP1(urb->transfer_buffer, urb->actual_length);
32524
32525 /* Anyone listening? */
32526 - if (serial->open_count == 0)
32527 + if (local_read(&serial->open_count) == 0)
32528 return;
32529
32530 if (status == 0) {
32531 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32532 spin_unlock_irq(&serial->serial_lock);
32533
32534 /* check for port already opened, if not set the termios */
32535 - serial->open_count++;
32536 - if (serial->open_count == 1) {
32537 + if (local_inc_return(&serial->open_count) == 1) {
32538 serial->rx_state = RX_IDLE;
32539 /* Force default termio settings */
32540 _hso_serial_set_termios(tty, NULL);
32541 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
32542 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32543 if (result) {
32544 hso_stop_serial_device(serial->parent);
32545 - serial->open_count--;
32546 + local_dec(&serial->open_count);
32547 kref_put(&serial->parent->ref, hso_serial_ref_free);
32548 }
32549 } else {
32550 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
32551
32552 /* reset the rts and dtr */
32553 /* do the actual close */
32554 - serial->open_count--;
32555 + local_dec(&serial->open_count);
32556
32557 - if (serial->open_count <= 0) {
32558 - serial->open_count = 0;
32559 + if (local_read(&serial->open_count) <= 0) {
32560 + local_set(&serial->open_count, 0);
32561 spin_lock_irq(&serial->serial_lock);
32562 if (serial->tty == tty) {
32563 serial->tty->driver_data = NULL;
32564 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
32565
32566 /* the actual setup */
32567 spin_lock_irqsave(&serial->serial_lock, flags);
32568 - if (serial->open_count)
32569 + if (local_read(&serial->open_count))
32570 _hso_serial_set_termios(tty, old);
32571 else
32572 tty->termios = old;
32573 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32574 D1("Pending read interrupt on port %d\n", i);
32575 spin_lock(&serial->serial_lock);
32576 if (serial->rx_state == RX_IDLE &&
32577 - serial->open_count > 0) {
32578 + local_read(&serial->open_count) > 0) {
32579 /* Setup and send a ctrl req read on
32580 * port i */
32581 if (!serial->rx_urb_filled[0]) {
32582 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32583 /* Start all serial ports */
32584 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32585 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32586 - if (dev2ser(serial_table[i])->open_count) {
32587 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32588 result =
32589 hso_start_serial_device(serial_table[i], GFP_NOIO);
32590 hso_kick_transmit(dev2ser(serial_table[i]));
32591 diff -urNp linux-3.0.8/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.8/drivers/net/vmxnet3/vmxnet3_ethtool.c
32592 --- linux-3.0.8/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
32593 +++ linux-3.0.8/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
32594 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
32595 * Return with error code if any of the queue indices
32596 * is out of range
32597 */
32598 - if (p->ring_index[i] < 0 ||
32599 - p->ring_index[i] >= adapter->num_rx_queues)
32600 + if (p->ring_index[i] >= adapter->num_rx_queues)
32601 return -EINVAL;
32602 }
32603
32604 diff -urNp linux-3.0.8/drivers/net/vxge/vxge-config.h linux-3.0.8/drivers/net/vxge/vxge-config.h
32605 --- linux-3.0.8/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
32606 +++ linux-3.0.8/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
32607 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
32608 void (*link_down)(struct __vxge_hw_device *devh);
32609 void (*crit_err)(struct __vxge_hw_device *devh,
32610 enum vxge_hw_event type, u64 ext_data);
32611 -};
32612 +} __no_const;
32613
32614 /*
32615 * struct __vxge_hw_blockpool_entry - Block private data structure
32616 diff -urNp linux-3.0.8/drivers/net/vxge/vxge-main.c linux-3.0.8/drivers/net/vxge/vxge-main.c
32617 --- linux-3.0.8/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
32618 +++ linux-3.0.8/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
32619 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32620 struct sk_buff *completed[NR_SKB_COMPLETED];
32621 int more;
32622
32623 + pax_track_stack();
32624 +
32625 do {
32626 more = 0;
32627 skb_ptr = completed;
32628 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
32629 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32630 int index;
32631
32632 + pax_track_stack();
32633 +
32634 /*
32635 * Filling
32636 * - itable with bucket numbers
32637 diff -urNp linux-3.0.8/drivers/net/vxge/vxge-traffic.h linux-3.0.8/drivers/net/vxge/vxge-traffic.h
32638 --- linux-3.0.8/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
32639 +++ linux-3.0.8/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
32640 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32641 struct vxge_hw_mempool_dma *dma_object,
32642 u32 index,
32643 u32 is_last);
32644 -};
32645 +} __no_const;
32646
32647 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32648 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32649 diff -urNp linux-3.0.8/drivers/net/wan/cycx_x25.c linux-3.0.8/drivers/net/wan/cycx_x25.c
32650 --- linux-3.0.8/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
32651 +++ linux-3.0.8/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
32652 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
32653 unsigned char hex[1024],
32654 * phex = hex;
32655
32656 + pax_track_stack();
32657 +
32658 if (len >= (sizeof(hex) / 2))
32659 len = (sizeof(hex) / 2) - 1;
32660
32661 diff -urNp linux-3.0.8/drivers/net/wan/hdlc_x25.c linux-3.0.8/drivers/net/wan/hdlc_x25.c
32662 --- linux-3.0.8/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
32663 +++ linux-3.0.8/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
32664 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32665
32666 static int x25_open(struct net_device *dev)
32667 {
32668 - struct lapb_register_struct cb;
32669 + static struct lapb_register_struct cb = {
32670 + .connect_confirmation = x25_connected,
32671 + .connect_indication = x25_connected,
32672 + .disconnect_confirmation = x25_disconnected,
32673 + .disconnect_indication = x25_disconnected,
32674 + .data_indication = x25_data_indication,
32675 + .data_transmit = x25_data_transmit
32676 + };
32677 int result;
32678
32679 - cb.connect_confirmation = x25_connected;
32680 - cb.connect_indication = x25_connected;
32681 - cb.disconnect_confirmation = x25_disconnected;
32682 - cb.disconnect_indication = x25_disconnected;
32683 - cb.data_indication = x25_data_indication;
32684 - cb.data_transmit = x25_data_transmit;
32685 -
32686 result = lapb_register(dev, &cb);
32687 if (result != LAPB_OK)
32688 return result;
32689 diff -urNp linux-3.0.8/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.8/drivers/net/wimax/i2400m/usb-fw.c
32690 --- linux-3.0.8/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
32691 +++ linux-3.0.8/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
32692 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32693 int do_autopm = 1;
32694 DECLARE_COMPLETION_ONSTACK(notif_completion);
32695
32696 + pax_track_stack();
32697 +
32698 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32699 i2400m, ack, ack_size);
32700 BUG_ON(_ack == i2400m->bm_ack_buf);
32701 diff -urNp linux-3.0.8/drivers/net/wireless/airo.c linux-3.0.8/drivers/net/wireless/airo.c
32702 --- linux-3.0.8/drivers/net/wireless/airo.c 2011-10-24 08:05:21.000000000 -0400
32703 +++ linux-3.0.8/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
32704 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32705 BSSListElement * loop_net;
32706 BSSListElement * tmp_net;
32707
32708 + pax_track_stack();
32709 +
32710 /* Blow away current list of scan results */
32711 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32712 list_move_tail (&loop_net->list, &ai->network_free_list);
32713 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32714 WepKeyRid wkr;
32715 int rc;
32716
32717 + pax_track_stack();
32718 +
32719 memset( &mySsid, 0, sizeof( mySsid ) );
32720 kfree (ai->flash);
32721 ai->flash = NULL;
32722 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32723 __le32 *vals = stats.vals;
32724 int len;
32725
32726 + pax_track_stack();
32727 +
32728 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32729 return -ENOMEM;
32730 data = file->private_data;
32731 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32732 /* If doLoseSync is not 1, we won't do a Lose Sync */
32733 int doLoseSync = -1;
32734
32735 + pax_track_stack();
32736 +
32737 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32738 return -ENOMEM;
32739 data = file->private_data;
32740 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32741 int i;
32742 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32743
32744 + pax_track_stack();
32745 +
32746 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32747 if (!qual)
32748 return -ENOMEM;
32749 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32750 CapabilityRid cap_rid;
32751 __le32 *vals = stats_rid.vals;
32752
32753 + pax_track_stack();
32754 +
32755 /* Get stats out of the card */
32756 clear_bit(JOB_WSTATS, &local->jobs);
32757 if (local->power.event) {
32758 diff -urNp linux-3.0.8/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.8/drivers/net/wireless/ath/ath5k/debug.c
32759 --- linux-3.0.8/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
32760 +++ linux-3.0.8/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
32761 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
32762 unsigned int v;
32763 u64 tsf;
32764
32765 + pax_track_stack();
32766 +
32767 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32768 len += snprintf(buf+len, sizeof(buf)-len,
32769 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32770 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
32771 unsigned int len = 0;
32772 unsigned int i;
32773
32774 + pax_track_stack();
32775 +
32776 len += snprintf(buf+len, sizeof(buf)-len,
32777 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32778
32779 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
32780 unsigned int i;
32781 unsigned int v;
32782
32783 + pax_track_stack();
32784 +
32785 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
32786 sc->ah->ah_ant_mode);
32787 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
32788 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
32789 unsigned int len = 0;
32790 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
32791
32792 + pax_track_stack();
32793 +
32794 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
32795 sc->bssidmask);
32796 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
32797 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
32798 unsigned int len = 0;
32799 int i;
32800
32801 + pax_track_stack();
32802 +
32803 len += snprintf(buf+len, sizeof(buf)-len,
32804 "RX\n---------------------\n");
32805 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
32806 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
32807 char buf[700];
32808 unsigned int len = 0;
32809
32810 + pax_track_stack();
32811 +
32812 len += snprintf(buf+len, sizeof(buf)-len,
32813 "HW has PHY error counters:\t%s\n",
32814 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
32815 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32816 struct ath5k_buf *bf, *bf0;
32817 int i, n;
32818
32819 + pax_track_stack();
32820 +
32821 len += snprintf(buf+len, sizeof(buf)-len,
32822 "available txbuffers: %d\n", sc->txbuf_len);
32823
32824 diff -urNp linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32825 --- linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
32826 +++ linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
32827 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32828 int i, im, j;
32829 int nmeasurement;
32830
32831 + pax_track_stack();
32832 +
32833 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32834 if (ah->txchainmask & (1 << i))
32835 num_chains++;
32836 diff -urNp linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32837 --- linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
32838 +++ linux-3.0.8/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
32839 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
32840 int theta_low_bin = 0;
32841 int i;
32842
32843 + pax_track_stack();
32844 +
32845 /* disregard any bin that contains <= 16 samples */
32846 thresh_accum_cnt = 16;
32847 scale_factor = 5;
32848 diff -urNp linux-3.0.8/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.8/drivers/net/wireless/ath/ath9k/debug.c
32849 --- linux-3.0.8/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
32850 +++ linux-3.0.8/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
32851 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
32852 char buf[512];
32853 unsigned int len = 0;
32854
32855 + pax_track_stack();
32856 +
32857 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32858 len += snprintf(buf + len, sizeof(buf) - len,
32859 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32860 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
32861 u8 addr[ETH_ALEN];
32862 u32 tmp;
32863
32864 + pax_track_stack();
32865 +
32866 len += snprintf(buf + len, sizeof(buf) - len,
32867 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32868 wiphy_name(sc->hw->wiphy),
32869 diff -urNp linux-3.0.8/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.8/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32870 --- linux-3.0.8/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
32871 +++ linux-3.0.8/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
32872 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32873 unsigned int len = 0;
32874 int ret = 0;
32875
32876 + pax_track_stack();
32877 +
32878 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32879
32880 ath9k_htc_ps_wakeup(priv);
32881 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32882 unsigned int len = 0;
32883 int ret = 0;
32884
32885 + pax_track_stack();
32886 +
32887 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32888
32889 ath9k_htc_ps_wakeup(priv);
32890 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32891 unsigned int len = 0;
32892 int ret = 0;
32893
32894 + pax_track_stack();
32895 +
32896 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32897
32898 ath9k_htc_ps_wakeup(priv);
32899 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32900 char buf[512];
32901 unsigned int len = 0;
32902
32903 + pax_track_stack();
32904 +
32905 len += snprintf(buf + len, sizeof(buf) - len,
32906 "%20s : %10u\n", "Buffers queued",
32907 priv->debug.tx_stats.buf_queued);
32908 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32909 char buf[512];
32910 unsigned int len = 0;
32911
32912 + pax_track_stack();
32913 +
32914 spin_lock_bh(&priv->tx.tx_lock);
32915
32916 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32917 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32918 char buf[512];
32919 unsigned int len = 0;
32920
32921 + pax_track_stack();
32922 +
32923 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32924 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32925
32926 diff -urNp linux-3.0.8/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.8/drivers/net/wireless/ath/ath9k/hw.h
32927 --- linux-3.0.8/drivers/net/wireless/ath/ath9k/hw.h 2011-10-24 08:05:21.000000000 -0400
32928 +++ linux-3.0.8/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
32929 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
32930
32931 /* ANI */
32932 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32933 -};
32934 +} __no_const;
32935
32936 /**
32937 * struct ath_hw_ops - callbacks used by hardware code and driver code
32938 @@ -637,7 +637,7 @@ struct ath_hw_ops {
32939 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32940 struct ath_hw_antcomb_conf *antconf);
32941
32942 -};
32943 +} __no_const;
32944
32945 struct ath_nf_limits {
32946 s16 max;
32947 @@ -650,7 +650,7 @@ struct ath_nf_limits {
32948 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32949
32950 struct ath_hw {
32951 - struct ath_ops reg_ops;
32952 + ath_ops_no_const reg_ops;
32953
32954 struct ieee80211_hw *hw;
32955 struct ath_common common;
32956 diff -urNp linux-3.0.8/drivers/net/wireless/ath/ath.h linux-3.0.8/drivers/net/wireless/ath/ath.h
32957 --- linux-3.0.8/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
32958 +++ linux-3.0.8/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
32959 @@ -121,6 +121,7 @@ struct ath_ops {
32960 void (*write_flush) (void *);
32961 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32962 };
32963 +typedef struct ath_ops __no_const ath_ops_no_const;
32964
32965 struct ath_common;
32966 struct ath_bus_ops;
32967 diff -urNp linux-3.0.8/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.8/drivers/net/wireless/ipw2x00/ipw2100.c
32968 --- linux-3.0.8/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
32969 +++ linux-3.0.8/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
32970 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
32971 int err;
32972 DECLARE_SSID_BUF(ssid);
32973
32974 + pax_track_stack();
32975 +
32976 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32977
32978 if (ssid_len)
32979 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
32980 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32981 int err;
32982
32983 + pax_track_stack();
32984 +
32985 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32986 idx, keylen, len);
32987
32988 diff -urNp linux-3.0.8/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.8/drivers/net/wireless/ipw2x00/libipw_rx.c
32989 --- linux-3.0.8/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
32990 +++ linux-3.0.8/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
32991 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32992 unsigned long flags;
32993 DECLARE_SSID_BUF(ssid);
32994
32995 + pax_track_stack();
32996 +
32997 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32998 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32999 print_ssid(ssid, info_element->data, info_element->len),
33000 diff -urNp linux-3.0.8/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.8/drivers/net/wireless/iwlegacy/iwl3945-base.c
33001 --- linux-3.0.8/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-24 08:05:30.000000000 -0400
33002 +++ linux-3.0.8/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:55:27.000000000 -0400
33003 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
33004 */
33005 if (iwl3945_mod_params.disable_hw_scan) {
33006 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33007 - iwl3945_hw_ops.hw_scan = NULL;
33008 + pax_open_kernel();
33009 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33010 + pax_close_kernel();
33011 }
33012
33013 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33014 diff -urNp linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
33015 --- linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
33016 +++ linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
33017 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
33018 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
33019 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
33020
33021 + pax_track_stack();
33022 +
33023 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
33024
33025 /* Treat uninitialized rate scaling data same as non-existing. */
33026 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
33027 container_of(lq_sta, struct iwl_station_priv, lq_sta);
33028 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
33029
33030 + pax_track_stack();
33031 +
33032 /* Override starting rate (index 0) if needed for debug purposes */
33033 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
33034
33035 diff -urNp linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debugfs.c
33036 --- linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
33037 +++ linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
33038 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
33039 int pos = 0;
33040 const size_t bufsz = sizeof(buf);
33041
33042 + pax_track_stack();
33043 +
33044 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
33045 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
33046 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
33047 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
33048 char buf[256 * NUM_IWL_RXON_CTX];
33049 const size_t bufsz = sizeof(buf);
33050
33051 + pax_track_stack();
33052 +
33053 for_each_context(priv, ctx) {
33054 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
33055 ctx->ctxid);
33056 diff -urNp linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debug.h
33057 --- linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
33058 +++ linux-3.0.8/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
33059 @@ -68,8 +68,8 @@ do {
33060 } while (0)
33061
33062 #else
33063 -#define IWL_DEBUG(__priv, level, fmt, args...)
33064 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33065 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33066 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33067 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33068 const void *p, u32 len)
33069 {}
33070 diff -urNp linux-3.0.8/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.8/drivers/net/wireless/iwmc3200wifi/debugfs.c
33071 --- linux-3.0.8/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
33072 +++ linux-3.0.8/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
33073 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33074 int buf_len = 512;
33075 size_t len = 0;
33076
33077 + pax_track_stack();
33078 +
33079 if (*ppos != 0)
33080 return 0;
33081 if (count < sizeof(buf))
33082 diff -urNp linux-3.0.8/drivers/net/wireless/mac80211_hwsim.c linux-3.0.8/drivers/net/wireless/mac80211_hwsim.c
33083 --- linux-3.0.8/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
33084 +++ linux-3.0.8/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
33085 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
33086 return -EINVAL;
33087
33088 if (fake_hw_scan) {
33089 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33090 - mac80211_hwsim_ops.sw_scan_start = NULL;
33091 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33092 + pax_open_kernel();
33093 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33094 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33095 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33096 + pax_close_kernel();
33097 }
33098
33099 spin_lock_init(&hwsim_radio_lock);
33100 diff -urNp linux-3.0.8/drivers/net/wireless/rndis_wlan.c linux-3.0.8/drivers/net/wireless/rndis_wlan.c
33101 --- linux-3.0.8/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
33102 +++ linux-3.0.8/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
33103 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
33104
33105 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33106
33107 - if (rts_threshold < 0 || rts_threshold > 2347)
33108 + if (rts_threshold > 2347)
33109 rts_threshold = 2347;
33110
33111 tmp = cpu_to_le32(rts_threshold);
33112 diff -urNp linux-3.0.8/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.8/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
33113 --- linux-3.0.8/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
33114 +++ linux-3.0.8/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
33115 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
33116 u8 rfpath;
33117 u8 num_total_rfpath = rtlphy->num_total_rfpath;
33118
33119 + pax_track_stack();
33120 +
33121 precommoncmdcnt = 0;
33122 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
33123 MAX_PRECMD_CNT,
33124 diff -urNp linux-3.0.8/drivers/net/wireless/wl1251/wl1251.h linux-3.0.8/drivers/net/wireless/wl1251/wl1251.h
33125 --- linux-3.0.8/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
33126 +++ linux-3.0.8/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
33127 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33128 void (*reset)(struct wl1251 *wl);
33129 void (*enable_irq)(struct wl1251 *wl);
33130 void (*disable_irq)(struct wl1251 *wl);
33131 -};
33132 +} __no_const;
33133
33134 struct wl1251 {
33135 struct ieee80211_hw *hw;
33136 diff -urNp linux-3.0.8/drivers/net/wireless/wl12xx/spi.c linux-3.0.8/drivers/net/wireless/wl12xx/spi.c
33137 --- linux-3.0.8/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
33138 +++ linux-3.0.8/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
33139 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
33140 u32 chunk_len;
33141 int i;
33142
33143 + pax_track_stack();
33144 +
33145 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
33146
33147 spi_message_init(&m);
33148 diff -urNp linux-3.0.8/drivers/oprofile/buffer_sync.c linux-3.0.8/drivers/oprofile/buffer_sync.c
33149 --- linux-3.0.8/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
33150 +++ linux-3.0.8/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
33151 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
33152 if (cookie == NO_COOKIE)
33153 offset = pc;
33154 if (cookie == INVALID_COOKIE) {
33155 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33156 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33157 offset = pc;
33158 }
33159 if (cookie != last_cookie) {
33160 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
33161 /* add userspace sample */
33162
33163 if (!mm) {
33164 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33165 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33166 return 0;
33167 }
33168
33169 cookie = lookup_dcookie(mm, s->eip, &offset);
33170
33171 if (cookie == INVALID_COOKIE) {
33172 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33173 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33174 return 0;
33175 }
33176
33177 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33178 /* ignore backtraces if failed to add a sample */
33179 if (state == sb_bt_start) {
33180 state = sb_bt_ignore;
33181 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33182 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33183 }
33184 }
33185 release_mm(mm);
33186 diff -urNp linux-3.0.8/drivers/oprofile/event_buffer.c linux-3.0.8/drivers/oprofile/event_buffer.c
33187 --- linux-3.0.8/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
33188 +++ linux-3.0.8/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
33189 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33190 }
33191
33192 if (buffer_pos == buffer_size) {
33193 - atomic_inc(&oprofile_stats.event_lost_overflow);
33194 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33195 return;
33196 }
33197
33198 diff -urNp linux-3.0.8/drivers/oprofile/oprof.c linux-3.0.8/drivers/oprofile/oprof.c
33199 --- linux-3.0.8/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
33200 +++ linux-3.0.8/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
33201 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33202 if (oprofile_ops.switch_events())
33203 return;
33204
33205 - atomic_inc(&oprofile_stats.multiplex_counter);
33206 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33207 start_switch_worker();
33208 }
33209
33210 diff -urNp linux-3.0.8/drivers/oprofile/oprofilefs.c linux-3.0.8/drivers/oprofile/oprofilefs.c
33211 --- linux-3.0.8/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
33212 +++ linux-3.0.8/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
33213 @@ -186,7 +186,7 @@ static const struct file_operations atom
33214
33215
33216 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33217 - char const *name, atomic_t *val)
33218 + char const *name, atomic_unchecked_t *val)
33219 {
33220 return __oprofilefs_create_file(sb, root, name,
33221 &atomic_ro_fops, 0444, val);
33222 diff -urNp linux-3.0.8/drivers/oprofile/oprofile_stats.c linux-3.0.8/drivers/oprofile/oprofile_stats.c
33223 --- linux-3.0.8/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
33224 +++ linux-3.0.8/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
33225 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33226 cpu_buf->sample_invalid_eip = 0;
33227 }
33228
33229 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33230 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33231 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33232 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33233 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33234 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33235 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33236 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33237 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33238 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33239 }
33240
33241
33242 diff -urNp linux-3.0.8/drivers/oprofile/oprofile_stats.h linux-3.0.8/drivers/oprofile/oprofile_stats.h
33243 --- linux-3.0.8/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
33244 +++ linux-3.0.8/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
33245 @@ -13,11 +13,11 @@
33246 #include <asm/atomic.h>
33247
33248 struct oprofile_stat_struct {
33249 - atomic_t sample_lost_no_mm;
33250 - atomic_t sample_lost_no_mapping;
33251 - atomic_t bt_lost_no_mapping;
33252 - atomic_t event_lost_overflow;
33253 - atomic_t multiplex_counter;
33254 + atomic_unchecked_t sample_lost_no_mm;
33255 + atomic_unchecked_t sample_lost_no_mapping;
33256 + atomic_unchecked_t bt_lost_no_mapping;
33257 + atomic_unchecked_t event_lost_overflow;
33258 + atomic_unchecked_t multiplex_counter;
33259 };
33260
33261 extern struct oprofile_stat_struct oprofile_stats;
33262 diff -urNp linux-3.0.8/drivers/parport/procfs.c linux-3.0.8/drivers/parport/procfs.c
33263 --- linux-3.0.8/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
33264 +++ linux-3.0.8/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
33265 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33266
33267 *ppos += len;
33268
33269 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33270 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33271 }
33272
33273 #ifdef CONFIG_PARPORT_1284
33274 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33275
33276 *ppos += len;
33277
33278 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33279 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33280 }
33281 #endif /* IEEE1284.3 support. */
33282
33283 diff -urNp linux-3.0.8/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.8/drivers/pci/hotplug/cpci_hotplug.h
33284 --- linux-3.0.8/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
33285 +++ linux-3.0.8/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
33286 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33287 int (*hardware_test) (struct slot* slot, u32 value);
33288 u8 (*get_power) (struct slot* slot);
33289 int (*set_power) (struct slot* slot, int value);
33290 -};
33291 +} __no_const;
33292
33293 struct cpci_hp_controller {
33294 unsigned int irq;
33295 diff -urNp linux-3.0.8/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.8/drivers/pci/hotplug/cpqphp_nvram.c
33296 --- linux-3.0.8/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
33297 +++ linux-3.0.8/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
33298 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33299
33300 void compaq_nvram_init (void __iomem *rom_start)
33301 {
33302 +
33303 +#ifndef CONFIG_PAX_KERNEXEC
33304 if (rom_start) {
33305 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33306 }
33307 +#endif
33308 +
33309 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33310
33311 /* initialize our int15 lock */
33312 diff -urNp linux-3.0.8/drivers/pci/pcie/aspm.c linux-3.0.8/drivers/pci/pcie/aspm.c
33313 --- linux-3.0.8/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
33314 +++ linux-3.0.8/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
33315 @@ -27,9 +27,9 @@
33316 #define MODULE_PARAM_PREFIX "pcie_aspm."
33317
33318 /* Note: those are not register definitions */
33319 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33320 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33321 -#define ASPM_STATE_L1 (4) /* L1 state */
33322 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33323 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33324 +#define ASPM_STATE_L1 (4U) /* L1 state */
33325 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33326 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33327
33328 diff -urNp linux-3.0.8/drivers/pci/probe.c linux-3.0.8/drivers/pci/probe.c
33329 --- linux-3.0.8/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
33330 +++ linux-3.0.8/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
33331 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
33332 u32 l, sz, mask;
33333 u16 orig_cmd;
33334
33335 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33336 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33337
33338 if (!dev->mmio_always_on) {
33339 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33340 diff -urNp linux-3.0.8/drivers/pci/proc.c linux-3.0.8/drivers/pci/proc.c
33341 --- linux-3.0.8/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
33342 +++ linux-3.0.8/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
33343 @@ -476,7 +476,16 @@ static const struct file_operations proc
33344 static int __init pci_proc_init(void)
33345 {
33346 struct pci_dev *dev = NULL;
33347 +
33348 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33349 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33350 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33351 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33352 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33353 +#endif
33354 +#else
33355 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33356 +#endif
33357 proc_create("devices", 0, proc_bus_pci_dir,
33358 &proc_bus_pci_dev_operations);
33359 proc_initialized = 1;
33360 diff -urNp linux-3.0.8/drivers/pci/xen-pcifront.c linux-3.0.8/drivers/pci/xen-pcifront.c
33361 --- linux-3.0.8/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
33362 +++ linux-3.0.8/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
33363 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
33364 struct pcifront_sd *sd = bus->sysdata;
33365 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33366
33367 + pax_track_stack();
33368 +
33369 if (verbose_request)
33370 dev_info(&pdev->xdev->dev,
33371 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
33372 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
33373 struct pcifront_sd *sd = bus->sysdata;
33374 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33375
33376 + pax_track_stack();
33377 +
33378 if (verbose_request)
33379 dev_info(&pdev->xdev->dev,
33380 "write dev=%04x:%02x:%02x.%01x - "
33381 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
33382 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33383 struct msi_desc *entry;
33384
33385 + pax_track_stack();
33386 +
33387 if (nvec > SH_INFO_MAX_VEC) {
33388 dev_err(&dev->dev, "too much vector for pci frontend: %x."
33389 " Increase SH_INFO_MAX_VEC.\n", nvec);
33390 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
33391 struct pcifront_sd *sd = dev->bus->sysdata;
33392 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33393
33394 + pax_track_stack();
33395 +
33396 err = do_pci_op(pdev, &op);
33397
33398 /* What should do for error ? */
33399 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
33400 struct pcifront_sd *sd = dev->bus->sysdata;
33401 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33402
33403 + pax_track_stack();
33404 +
33405 err = do_pci_op(pdev, &op);
33406 if (likely(!err)) {
33407 vector[0] = op.value;
33408 diff -urNp linux-3.0.8/drivers/platform/x86/thinkpad_acpi.c linux-3.0.8/drivers/platform/x86/thinkpad_acpi.c
33409 --- linux-3.0.8/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
33410 +++ linux-3.0.8/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
33411 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33412 return 0;
33413 }
33414
33415 -void static hotkey_mask_warn_incomplete_mask(void)
33416 +static void hotkey_mask_warn_incomplete_mask(void)
33417 {
33418 /* log only what the user can fix... */
33419 const u32 wantedmask = hotkey_driver_mask &
33420 diff -urNp linux-3.0.8/drivers/pnp/pnpbios/bioscalls.c linux-3.0.8/drivers/pnp/pnpbios/bioscalls.c
33421 --- linux-3.0.8/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
33422 +++ linux-3.0.8/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
33423 @@ -59,7 +59,7 @@ do { \
33424 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33425 } while(0)
33426
33427 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33428 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33429 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33430
33431 /*
33432 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
33433
33434 cpu = get_cpu();
33435 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33436 +
33437 + pax_open_kernel();
33438 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33439 + pax_close_kernel();
33440
33441 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33442 spin_lock_irqsave(&pnp_bios_lock, flags);
33443 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
33444 :"memory");
33445 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33446
33447 + pax_open_kernel();
33448 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33449 + pax_close_kernel();
33450 +
33451 put_cpu();
33452
33453 /* If we get here and this is set then the PnP BIOS faulted on us. */
33454 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
33455 return status;
33456 }
33457
33458 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33459 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33460 {
33461 int i;
33462
33463 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
33464 pnp_bios_callpoint.offset = header->fields.pm16offset;
33465 pnp_bios_callpoint.segment = PNP_CS16;
33466
33467 + pax_open_kernel();
33468 +
33469 for_each_possible_cpu(i) {
33470 struct desc_struct *gdt = get_cpu_gdt_table(i);
33471 if (!gdt)
33472 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
33473 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33474 (unsigned long)__va(header->fields.pm16dseg));
33475 }
33476 +
33477 + pax_close_kernel();
33478 }
33479 diff -urNp linux-3.0.8/drivers/pnp/resource.c linux-3.0.8/drivers/pnp/resource.c
33480 --- linux-3.0.8/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
33481 +++ linux-3.0.8/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
33482 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33483 return 1;
33484
33485 /* check if the resource is valid */
33486 - if (*irq < 0 || *irq > 15)
33487 + if (*irq > 15)
33488 return 0;
33489
33490 /* check if the resource is reserved */
33491 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33492 return 1;
33493
33494 /* check if the resource is valid */
33495 - if (*dma < 0 || *dma == 4 || *dma > 7)
33496 + if (*dma == 4 || *dma > 7)
33497 return 0;
33498
33499 /* check if the resource is reserved */
33500 diff -urNp linux-3.0.8/drivers/power/bq27x00_battery.c linux-3.0.8/drivers/power/bq27x00_battery.c
33501 --- linux-3.0.8/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
33502 +++ linux-3.0.8/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
33503 @@ -67,7 +67,7 @@
33504 struct bq27x00_device_info;
33505 struct bq27x00_access_methods {
33506 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33507 -};
33508 +} __no_const;
33509
33510 enum bq27x00_chip { BQ27000, BQ27500 };
33511
33512 diff -urNp linux-3.0.8/drivers/regulator/max8660.c linux-3.0.8/drivers/regulator/max8660.c
33513 --- linux-3.0.8/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
33514 +++ linux-3.0.8/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
33515 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
33516 max8660->shadow_regs[MAX8660_OVER1] = 5;
33517 } else {
33518 /* Otherwise devices can be toggled via software */
33519 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
33520 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
33521 + pax_open_kernel();
33522 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33523 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33524 + pax_close_kernel();
33525 }
33526
33527 /*
33528 diff -urNp linux-3.0.8/drivers/regulator/mc13892-regulator.c linux-3.0.8/drivers/regulator/mc13892-regulator.c
33529 --- linux-3.0.8/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
33530 +++ linux-3.0.8/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
33531 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
33532 }
33533 mc13xxx_unlock(mc13892);
33534
33535 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33536 + pax_open_kernel();
33537 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33538 = mc13892_vcam_set_mode;
33539 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33540 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33541 = mc13892_vcam_get_mode;
33542 + pax_close_kernel();
33543 for (i = 0; i < pdata->num_regulators; i++) {
33544 init_data = &pdata->regulators[i];
33545 priv->regulators[i] = regulator_register(
33546 diff -urNp linux-3.0.8/drivers/rtc/rtc-dev.c linux-3.0.8/drivers/rtc/rtc-dev.c
33547 --- linux-3.0.8/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
33548 +++ linux-3.0.8/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
33549 @@ -14,6 +14,7 @@
33550 #include <linux/module.h>
33551 #include <linux/rtc.h>
33552 #include <linux/sched.h>
33553 +#include <linux/grsecurity.h>
33554 #include "rtc-core.h"
33555
33556 static dev_t rtc_devt;
33557 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
33558 if (copy_from_user(&tm, uarg, sizeof(tm)))
33559 return -EFAULT;
33560
33561 + gr_log_timechange();
33562 +
33563 return rtc_set_time(rtc, &tm);
33564
33565 case RTC_PIE_ON:
33566 diff -urNp linux-3.0.8/drivers/scsi/aacraid/aacraid.h linux-3.0.8/drivers/scsi/aacraid/aacraid.h
33567 --- linux-3.0.8/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
33568 +++ linux-3.0.8/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
33569 @@ -492,7 +492,7 @@ struct adapter_ops
33570 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33571 /* Administrative operations */
33572 int (*adapter_comm)(struct aac_dev * dev, int comm);
33573 -};
33574 +} __no_const;
33575
33576 /*
33577 * Define which interrupt handler needs to be installed
33578 diff -urNp linux-3.0.8/drivers/scsi/aacraid/commctrl.c linux-3.0.8/drivers/scsi/aacraid/commctrl.c
33579 --- linux-3.0.8/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
33580 +++ linux-3.0.8/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
33581 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33582 u32 actual_fibsize64, actual_fibsize = 0;
33583 int i;
33584
33585 + pax_track_stack();
33586
33587 if (dev->in_reset) {
33588 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33589 diff -urNp linux-3.0.8/drivers/scsi/aacraid/linit.c linux-3.0.8/drivers/scsi/aacraid/linit.c
33590 --- linux-3.0.8/drivers/scsi/aacraid/linit.c 2011-07-21 22:17:23.000000000 -0400
33591 +++ linux-3.0.8/drivers/scsi/aacraid/linit.c 2011-10-11 10:44:33.000000000 -0400
33592 @@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33593 #elif defined(__devinitconst)
33594 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33595 #else
33596 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33597 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33598 #endif
33599 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33600 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33601 diff -urNp linux-3.0.8/drivers/scsi/aic94xx/aic94xx_init.c linux-3.0.8/drivers/scsi/aic94xx/aic94xx_init.c
33602 --- linux-3.0.8/drivers/scsi/aic94xx/aic94xx_init.c 2011-07-21 22:17:23.000000000 -0400
33603 +++ linux-3.0.8/drivers/scsi/aic94xx/aic94xx_init.c 2011-10-11 10:44:33.000000000 -0400
33604 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33605 .lldd_control_phy = asd_control_phy,
33606 };
33607
33608 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33609 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33610 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33611 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33612 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33613 diff -urNp linux-3.0.8/drivers/scsi/bfa/bfad.c linux-3.0.8/drivers/scsi/bfa/bfad.c
33614 --- linux-3.0.8/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
33615 +++ linux-3.0.8/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
33616 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33617 struct bfad_vport_s *vport, *vport_new;
33618 struct bfa_fcs_driver_info_s driver_info;
33619
33620 + pax_track_stack();
33621 +
33622 /* Fill the driver_info info to fcs*/
33623 memset(&driver_info, 0, sizeof(driver_info));
33624 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
33625 diff -urNp linux-3.0.8/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.8/drivers/scsi/bfa/bfa_fcs_lport.c
33626 --- linux-3.0.8/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
33627 +++ linux-3.0.8/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
33628 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33629 u16 len, count;
33630 u16 templen;
33631
33632 + pax_track_stack();
33633 +
33634 /*
33635 * get hba attributes
33636 */
33637 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33638 u8 count = 0;
33639 u16 templen;
33640
33641 + pax_track_stack();
33642 +
33643 /*
33644 * get port attributes
33645 */
33646 diff -urNp linux-3.0.8/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.8/drivers/scsi/bfa/bfa_fcs_rport.c
33647 --- linux-3.0.8/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
33648 +++ linux-3.0.8/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
33649 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33650 struct fc_rpsc_speed_info_s speeds;
33651 struct bfa_port_attr_s pport_attr;
33652
33653 + pax_track_stack();
33654 +
33655 bfa_trc(port->fcs, rx_fchs->s_id);
33656 bfa_trc(port->fcs, rx_fchs->d_id);
33657
33658 diff -urNp linux-3.0.8/drivers/scsi/bfa/bfa.h linux-3.0.8/drivers/scsi/bfa/bfa.h
33659 --- linux-3.0.8/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
33660 +++ linux-3.0.8/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
33661 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
33662 u32 *nvecs, u32 *maxvec);
33663 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
33664 u32 *end);
33665 -};
33666 +} __no_const;
33667 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33668
33669 struct bfa_iocfc_s {
33670 diff -urNp linux-3.0.8/drivers/scsi/bfa/bfa_ioc.h linux-3.0.8/drivers/scsi/bfa/bfa_ioc.h
33671 --- linux-3.0.8/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
33672 +++ linux-3.0.8/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
33673 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
33674 bfa_ioc_disable_cbfn_t disable_cbfn;
33675 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33676 bfa_ioc_reset_cbfn_t reset_cbfn;
33677 -};
33678 +} __no_const;
33679
33680 /*
33681 * Heartbeat failure notification queue element.
33682 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
33683 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
33684 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33685 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33686 -};
33687 +} __no_const;
33688
33689 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
33690 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
33691 diff -urNp linux-3.0.8/drivers/scsi/BusLogic.c linux-3.0.8/drivers/scsi/BusLogic.c
33692 --- linux-3.0.8/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
33693 +++ linux-3.0.8/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
33694 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33695 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33696 *PrototypeHostAdapter)
33697 {
33698 + pax_track_stack();
33699 +
33700 /*
33701 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33702 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33703 diff -urNp linux-3.0.8/drivers/scsi/dpt_i2o.c linux-3.0.8/drivers/scsi/dpt_i2o.c
33704 --- linux-3.0.8/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
33705 +++ linux-3.0.8/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
33706 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33707 dma_addr_t addr;
33708 ulong flags = 0;
33709
33710 + pax_track_stack();
33711 +
33712 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33713 // get user msg size in u32s
33714 if(get_user(size, &user_msg[0])){
33715 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33716 s32 rcode;
33717 dma_addr_t addr;
33718
33719 + pax_track_stack();
33720 +
33721 memset(msg, 0 , sizeof(msg));
33722 len = scsi_bufflen(cmd);
33723 direction = 0x00000000;
33724 diff -urNp linux-3.0.8/drivers/scsi/eata.c linux-3.0.8/drivers/scsi/eata.c
33725 --- linux-3.0.8/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
33726 +++ linux-3.0.8/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
33727 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33728 struct hostdata *ha;
33729 char name[16];
33730
33731 + pax_track_stack();
33732 +
33733 sprintf(name, "%s%d", driver_name, j);
33734
33735 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33736 diff -urNp linux-3.0.8/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.8/drivers/scsi/fcoe/fcoe_ctlr.c
33737 --- linux-3.0.8/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
33738 +++ linux-3.0.8/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
33739 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33740 } buf;
33741 int rc;
33742
33743 + pax_track_stack();
33744 +
33745 fiph = (struct fip_header *)skb->data;
33746 sub = fiph->fip_subcode;
33747
33748 diff -urNp linux-3.0.8/drivers/scsi/gdth.c linux-3.0.8/drivers/scsi/gdth.c
33749 --- linux-3.0.8/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
33750 +++ linux-3.0.8/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
33751 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33752 unsigned long flags;
33753 gdth_ha_str *ha;
33754
33755 + pax_track_stack();
33756 +
33757 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33758 return -EFAULT;
33759 ha = gdth_find_ha(ldrv.ionode);
33760 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33761 gdth_ha_str *ha;
33762 int rval;
33763
33764 + pax_track_stack();
33765 +
33766 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33767 res.number >= MAX_HDRIVES)
33768 return -EFAULT;
33769 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33770 gdth_ha_str *ha;
33771 int rval;
33772
33773 + pax_track_stack();
33774 +
33775 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33776 return -EFAULT;
33777 ha = gdth_find_ha(gen.ionode);
33778 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33779 int i;
33780 gdth_cmd_str gdtcmd;
33781 char cmnd[MAX_COMMAND_SIZE];
33782 +
33783 + pax_track_stack();
33784 +
33785 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33786
33787 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33788 diff -urNp linux-3.0.8/drivers/scsi/gdth_proc.c linux-3.0.8/drivers/scsi/gdth_proc.c
33789 --- linux-3.0.8/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
33790 +++ linux-3.0.8/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
33791 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33792 u64 paddr;
33793
33794 char cmnd[MAX_COMMAND_SIZE];
33795 +
33796 + pax_track_stack();
33797 +
33798 memset(cmnd, 0xff, 12);
33799 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33800
33801 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33802 gdth_hget_str *phg;
33803 char cmnd[MAX_COMMAND_SIZE];
33804
33805 + pax_track_stack();
33806 +
33807 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33808 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33809 if (!gdtcmd || !estr)
33810 diff -urNp linux-3.0.8/drivers/scsi/hosts.c linux-3.0.8/drivers/scsi/hosts.c
33811 --- linux-3.0.8/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
33812 +++ linux-3.0.8/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
33813 @@ -42,7 +42,7 @@
33814 #include "scsi_logging.h"
33815
33816
33817 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33818 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33819
33820
33821 static void scsi_host_cls_release(struct device *dev)
33822 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33823 * subtract one because we increment first then return, but we need to
33824 * know what the next host number was before increment
33825 */
33826 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33827 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33828 shost->dma_channel = 0xff;
33829
33830 /* These three are default values which can be overridden */
33831 diff -urNp linux-3.0.8/drivers/scsi/hpsa.c linux-3.0.8/drivers/scsi/hpsa.c
33832 --- linux-3.0.8/drivers/scsi/hpsa.c 2011-10-24 08:05:30.000000000 -0400
33833 +++ linux-3.0.8/drivers/scsi/hpsa.c 2011-10-16 21:55:27.000000000 -0400
33834 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33835 u32 a;
33836
33837 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33838 - return h->access.command_completed(h);
33839 + return h->access->command_completed(h);
33840
33841 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33842 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33843 @@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33844 while (!list_empty(&h->reqQ)) {
33845 c = list_entry(h->reqQ.next, struct CommandList, list);
33846 /* can't do anything if fifo is full */
33847 - if ((h->access.fifo_full(h))) {
33848 + if ((h->access->fifo_full(h))) {
33849 dev_warn(&h->pdev->dev, "fifo full\n");
33850 break;
33851 }
33852 @@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33853 h->Qdepth--;
33854
33855 /* Tell the controller execute command */
33856 - h->access.submit_command(h, c);
33857 + h->access->submit_command(h, c);
33858
33859 /* Put job onto the completed Q */
33860 addQ(&h->cmpQ, c);
33861 @@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33862
33863 static inline unsigned long get_next_completion(struct ctlr_info *h)
33864 {
33865 - return h->access.command_completed(h);
33866 + return h->access->command_completed(h);
33867 }
33868
33869 static inline bool interrupt_pending(struct ctlr_info *h)
33870 {
33871 - return h->access.intr_pending(h);
33872 + return h->access->intr_pending(h);
33873 }
33874
33875 static inline long interrupt_not_for_us(struct ctlr_info *h)
33876 {
33877 - return (h->access.intr_pending(h) == 0) ||
33878 + return (h->access->intr_pending(h) == 0) ||
33879 (h->interrupts_enabled == 0);
33880 }
33881
33882 @@ -3874,7 +3874,7 @@ static int __devinit hpsa_pci_init(struc
33883 if (prod_index < 0)
33884 return -ENODEV;
33885 h->product_name = products[prod_index].product_name;
33886 - h->access = *(products[prod_index].access);
33887 + h->access = products[prod_index].access;
33888
33889 if (hpsa_board_disabled(h->pdev)) {
33890 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33891 @@ -4151,7 +4151,7 @@ reinit_after_soft_reset:
33892 }
33893
33894 /* make sure the board interrupts are off */
33895 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33896 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33897
33898 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33899 goto clean2;
33900 @@ -4185,7 +4185,7 @@ reinit_after_soft_reset:
33901 * fake ones to scoop up any residual completions.
33902 */
33903 spin_lock_irqsave(&h->lock, flags);
33904 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33905 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33906 spin_unlock_irqrestore(&h->lock, flags);
33907 free_irq(h->intr[h->intr_mode], h);
33908 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33909 @@ -4204,9 +4204,9 @@ reinit_after_soft_reset:
33910 dev_info(&h->pdev->dev, "Board READY.\n");
33911 dev_info(&h->pdev->dev,
33912 "Waiting for stale completions to drain.\n");
33913 - h->access.set_intr_mask(h, HPSA_INTR_ON);
33914 + h->access->set_intr_mask(h, HPSA_INTR_ON);
33915 msleep(10000);
33916 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33917 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33918
33919 rc = controller_reset_failed(h->cfgtable);
33920 if (rc)
33921 @@ -4227,7 +4227,7 @@ reinit_after_soft_reset:
33922 }
33923
33924 /* Turn the interrupts on so we can service requests */
33925 - h->access.set_intr_mask(h, HPSA_INTR_ON);
33926 + h->access->set_intr_mask(h, HPSA_INTR_ON);
33927
33928 hpsa_hba_inquiry(h);
33929 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33930 @@ -4280,7 +4280,7 @@ static void hpsa_shutdown(struct pci_dev
33931 * To write all data in the battery backed cache to disks
33932 */
33933 hpsa_flush_cache(h);
33934 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
33935 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
33936 free_irq(h->intr[h->intr_mode], h);
33937 #ifdef CONFIG_PCI_MSI
33938 if (h->msix_vector)
33939 @@ -4443,7 +4443,7 @@ static __devinit void hpsa_enter_perform
33940 return;
33941 }
33942 /* Change the access methods to the performant access methods */
33943 - h->access = SA5_performant_access;
33944 + h->access = &SA5_performant_access;
33945 h->transMethod = CFGTBL_Trans_Performant;
33946 }
33947
33948 diff -urNp linux-3.0.8/drivers/scsi/hpsa.h linux-3.0.8/drivers/scsi/hpsa.h
33949 --- linux-3.0.8/drivers/scsi/hpsa.h 2011-10-24 08:05:21.000000000 -0400
33950 +++ linux-3.0.8/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
33951 @@ -73,7 +73,7 @@ struct ctlr_info {
33952 unsigned int msix_vector;
33953 unsigned int msi_vector;
33954 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33955 - struct access_method access;
33956 + struct access_method *access;
33957
33958 /* queue and queue Info */
33959 struct list_head reqQ;
33960 diff -urNp linux-3.0.8/drivers/scsi/ips.h linux-3.0.8/drivers/scsi/ips.h
33961 --- linux-3.0.8/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
33962 +++ linux-3.0.8/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
33963 @@ -1027,7 +1027,7 @@ typedef struct {
33964 int (*intr)(struct ips_ha *);
33965 void (*enableint)(struct ips_ha *);
33966 uint32_t (*statupd)(struct ips_ha *);
33967 -} ips_hw_func_t;
33968 +} __no_const ips_hw_func_t;
33969
33970 typedef struct ips_ha {
33971 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33972 diff -urNp linux-3.0.8/drivers/scsi/libfc/fc_exch.c linux-3.0.8/drivers/scsi/libfc/fc_exch.c
33973 --- linux-3.0.8/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
33974 +++ linux-3.0.8/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
33975 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
33976 * all together if not used XXX
33977 */
33978 struct {
33979 - atomic_t no_free_exch;
33980 - atomic_t no_free_exch_xid;
33981 - atomic_t xid_not_found;
33982 - atomic_t xid_busy;
33983 - atomic_t seq_not_found;
33984 - atomic_t non_bls_resp;
33985 + atomic_unchecked_t no_free_exch;
33986 + atomic_unchecked_t no_free_exch_xid;
33987 + atomic_unchecked_t xid_not_found;
33988 + atomic_unchecked_t xid_busy;
33989 + atomic_unchecked_t seq_not_found;
33990 + atomic_unchecked_t non_bls_resp;
33991 } stats;
33992 };
33993
33994 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
33995 /* allocate memory for exchange */
33996 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33997 if (!ep) {
33998 - atomic_inc(&mp->stats.no_free_exch);
33999 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34000 goto out;
34001 }
34002 memset(ep, 0, sizeof(*ep));
34003 @@ -761,7 +761,7 @@ out:
34004 return ep;
34005 err:
34006 spin_unlock_bh(&pool->lock);
34007 - atomic_inc(&mp->stats.no_free_exch_xid);
34008 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34009 mempool_free(ep, mp->ep_pool);
34010 return NULL;
34011 }
34012 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34013 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34014 ep = fc_exch_find(mp, xid);
34015 if (!ep) {
34016 - atomic_inc(&mp->stats.xid_not_found);
34017 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34018 reject = FC_RJT_OX_ID;
34019 goto out;
34020 }
34021 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34022 ep = fc_exch_find(mp, xid);
34023 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34024 if (ep) {
34025 - atomic_inc(&mp->stats.xid_busy);
34026 + atomic_inc_unchecked(&mp->stats.xid_busy);
34027 reject = FC_RJT_RX_ID;
34028 goto rel;
34029 }
34030 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34031 }
34032 xid = ep->xid; /* get our XID */
34033 } else if (!ep) {
34034 - atomic_inc(&mp->stats.xid_not_found);
34035 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34036 reject = FC_RJT_RX_ID; /* XID not found */
34037 goto out;
34038 }
34039 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34040 } else {
34041 sp = &ep->seq;
34042 if (sp->id != fh->fh_seq_id) {
34043 - atomic_inc(&mp->stats.seq_not_found);
34044 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34045 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
34046 goto rel;
34047 }
34048 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
34049
34050 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34051 if (!ep) {
34052 - atomic_inc(&mp->stats.xid_not_found);
34053 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34054 goto out;
34055 }
34056 if (ep->esb_stat & ESB_ST_COMPLETE) {
34057 - atomic_inc(&mp->stats.xid_not_found);
34058 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34059 goto rel;
34060 }
34061 if (ep->rxid == FC_XID_UNKNOWN)
34062 ep->rxid = ntohs(fh->fh_rx_id);
34063 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34064 - atomic_inc(&mp->stats.xid_not_found);
34065 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34066 goto rel;
34067 }
34068 if (ep->did != ntoh24(fh->fh_s_id) &&
34069 ep->did != FC_FID_FLOGI) {
34070 - atomic_inc(&mp->stats.xid_not_found);
34071 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34072 goto rel;
34073 }
34074 sof = fr_sof(fp);
34075 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
34076 sp->ssb_stat |= SSB_ST_RESP;
34077 sp->id = fh->fh_seq_id;
34078 } else if (sp->id != fh->fh_seq_id) {
34079 - atomic_inc(&mp->stats.seq_not_found);
34080 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34081 goto rel;
34082 }
34083
34084 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
34085 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34086
34087 if (!sp)
34088 - atomic_inc(&mp->stats.xid_not_found);
34089 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34090 else
34091 - atomic_inc(&mp->stats.non_bls_resp);
34092 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34093
34094 fc_frame_free(fp);
34095 }
34096 diff -urNp linux-3.0.8/drivers/scsi/libsas/sas_ata.c linux-3.0.8/drivers/scsi/libsas/sas_ata.c
34097 --- linux-3.0.8/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
34098 +++ linux-3.0.8/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
34099 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
34100 .postreset = ata_std_postreset,
34101 .error_handler = ata_std_error_handler,
34102 .post_internal_cmd = sas_ata_post_internal,
34103 - .qc_defer = ata_std_qc_defer,
34104 + .qc_defer = ata_std_qc_defer,
34105 .qc_prep = ata_noop_qc_prep,
34106 .qc_issue = sas_ata_qc_issue,
34107 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34108 diff -urNp linux-3.0.8/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.8/drivers/scsi/lpfc/lpfc_debugfs.c
34109 --- linux-3.0.8/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
34110 +++ linux-3.0.8/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
34111 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
34112
34113 #include <linux/debugfs.h>
34114
34115 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34116 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34117 static unsigned long lpfc_debugfs_start_time = 0L;
34118
34119 /* iDiag */
34120 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34121 lpfc_debugfs_enable = 0;
34122
34123 len = 0;
34124 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34125 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34126 (lpfc_debugfs_max_disc_trc - 1);
34127 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34128 dtp = vport->disc_trc + i;
34129 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34130 lpfc_debugfs_enable = 0;
34131
34132 len = 0;
34133 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34134 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34135 (lpfc_debugfs_max_slow_ring_trc - 1);
34136 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34137 dtp = phba->slow_ring_trc + i;
34138 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
34139 uint32_t *ptr;
34140 char buffer[1024];
34141
34142 + pax_track_stack();
34143 +
34144 off = 0;
34145 spin_lock_irq(&phba->hbalock);
34146
34147 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34148 !vport || !vport->disc_trc)
34149 return;
34150
34151 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34152 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34153 (lpfc_debugfs_max_disc_trc - 1);
34154 dtp = vport->disc_trc + index;
34155 dtp->fmt = fmt;
34156 dtp->data1 = data1;
34157 dtp->data2 = data2;
34158 dtp->data3 = data3;
34159 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34160 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34161 dtp->jif = jiffies;
34162 #endif
34163 return;
34164 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34165 !phba || !phba->slow_ring_trc)
34166 return;
34167
34168 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34169 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34170 (lpfc_debugfs_max_slow_ring_trc - 1);
34171 dtp = phba->slow_ring_trc + index;
34172 dtp->fmt = fmt;
34173 dtp->data1 = data1;
34174 dtp->data2 = data2;
34175 dtp->data3 = data3;
34176 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34177 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34178 dtp->jif = jiffies;
34179 #endif
34180 return;
34181 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34182 "slow_ring buffer\n");
34183 goto debug_failed;
34184 }
34185 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34186 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34187 memset(phba->slow_ring_trc, 0,
34188 (sizeof(struct lpfc_debugfs_trc) *
34189 lpfc_debugfs_max_slow_ring_trc));
34190 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34191 "buffer\n");
34192 goto debug_failed;
34193 }
34194 - atomic_set(&vport->disc_trc_cnt, 0);
34195 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34196
34197 snprintf(name, sizeof(name), "discovery_trace");
34198 vport->debug_disc_trc =
34199 diff -urNp linux-3.0.8/drivers/scsi/lpfc/lpfc.h linux-3.0.8/drivers/scsi/lpfc/lpfc.h
34200 --- linux-3.0.8/drivers/scsi/lpfc/lpfc.h 2011-10-24 08:05:30.000000000 -0400
34201 +++ linux-3.0.8/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:55:27.000000000 -0400
34202 @@ -425,7 +425,7 @@ struct lpfc_vport {
34203 struct dentry *debug_nodelist;
34204 struct dentry *vport_debugfs_root;
34205 struct lpfc_debugfs_trc *disc_trc;
34206 - atomic_t disc_trc_cnt;
34207 + atomic_unchecked_t disc_trc_cnt;
34208 #endif
34209 uint8_t stat_data_enabled;
34210 uint8_t stat_data_blocked;
34211 @@ -832,8 +832,8 @@ struct lpfc_hba {
34212 struct timer_list fabric_block_timer;
34213 unsigned long bit_flags;
34214 #define FABRIC_COMANDS_BLOCKED 0
34215 - atomic_t num_rsrc_err;
34216 - atomic_t num_cmd_success;
34217 + atomic_unchecked_t num_rsrc_err;
34218 + atomic_unchecked_t num_cmd_success;
34219 unsigned long last_rsrc_error_time;
34220 unsigned long last_ramp_down_time;
34221 unsigned long last_ramp_up_time;
34222 @@ -847,7 +847,7 @@ struct lpfc_hba {
34223 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34224 struct dentry *debug_slow_ring_trc;
34225 struct lpfc_debugfs_trc *slow_ring_trc;
34226 - atomic_t slow_ring_trc_cnt;
34227 + atomic_unchecked_t slow_ring_trc_cnt;
34228 /* iDiag debugfs sub-directory */
34229 struct dentry *idiag_root;
34230 struct dentry *idiag_pci_cfg;
34231 diff -urNp linux-3.0.8/drivers/scsi/lpfc/lpfc_init.c linux-3.0.8/drivers/scsi/lpfc/lpfc_init.c
34232 --- linux-3.0.8/drivers/scsi/lpfc/lpfc_init.c 2011-10-24 08:05:30.000000000 -0400
34233 +++ linux-3.0.8/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:55:27.000000000 -0400
34234 @@ -9971,8 +9971,10 @@ lpfc_init(void)
34235 printk(LPFC_COPYRIGHT "\n");
34236
34237 if (lpfc_enable_npiv) {
34238 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34239 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34240 + pax_open_kernel();
34241 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34242 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34243 + pax_close_kernel();
34244 }
34245 lpfc_transport_template =
34246 fc_attach_transport(&lpfc_transport_functions);
34247 diff -urNp linux-3.0.8/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.8/drivers/scsi/lpfc/lpfc_scsi.c
34248 --- linux-3.0.8/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-24 08:05:30.000000000 -0400
34249 +++ linux-3.0.8/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:55:27.000000000 -0400
34250 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34251 uint32_t evt_posted;
34252
34253 spin_lock_irqsave(&phba->hbalock, flags);
34254 - atomic_inc(&phba->num_rsrc_err);
34255 + atomic_inc_unchecked(&phba->num_rsrc_err);
34256 phba->last_rsrc_error_time = jiffies;
34257
34258 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34259 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34260 unsigned long flags;
34261 struct lpfc_hba *phba = vport->phba;
34262 uint32_t evt_posted;
34263 - atomic_inc(&phba->num_cmd_success);
34264 + atomic_inc_unchecked(&phba->num_cmd_success);
34265
34266 if (vport->cfg_lun_queue_depth <= queue_depth)
34267 return;
34268 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34269 unsigned long num_rsrc_err, num_cmd_success;
34270 int i;
34271
34272 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34273 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34274 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34275 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34276
34277 vports = lpfc_create_vport_work_array(phba);
34278 if (vports != NULL)
34279 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34280 }
34281 }
34282 lpfc_destroy_vport_work_array(phba, vports);
34283 - atomic_set(&phba->num_rsrc_err, 0);
34284 - atomic_set(&phba->num_cmd_success, 0);
34285 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34286 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34287 }
34288
34289 /**
34290 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34291 }
34292 }
34293 lpfc_destroy_vport_work_array(phba, vports);
34294 - atomic_set(&phba->num_rsrc_err, 0);
34295 - atomic_set(&phba->num_cmd_success, 0);
34296 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34297 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34298 }
34299
34300 /**
34301 diff -urNp linux-3.0.8/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.8/drivers/scsi/megaraid/megaraid_mbox.c
34302 --- linux-3.0.8/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
34303 +++ linux-3.0.8/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
34304 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34305 int rval;
34306 int i;
34307
34308 + pax_track_stack();
34309 +
34310 // Allocate memory for the base list of scb for management module.
34311 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34312
34313 diff -urNp linux-3.0.8/drivers/scsi/osd/osd_initiator.c linux-3.0.8/drivers/scsi/osd/osd_initiator.c
34314 --- linux-3.0.8/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
34315 +++ linux-3.0.8/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
34316 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
34317 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34318 int ret;
34319
34320 + pax_track_stack();
34321 +
34322 or = osd_start_request(od, GFP_KERNEL);
34323 if (!or)
34324 return -ENOMEM;
34325 diff -urNp linux-3.0.8/drivers/scsi/pmcraid.c linux-3.0.8/drivers/scsi/pmcraid.c
34326 --- linux-3.0.8/drivers/scsi/pmcraid.c 2011-10-24 08:05:21.000000000 -0400
34327 +++ linux-3.0.8/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
34328 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
34329 res->scsi_dev = scsi_dev;
34330 scsi_dev->hostdata = res;
34331 res->change_detected = 0;
34332 - atomic_set(&res->read_failures, 0);
34333 - atomic_set(&res->write_failures, 0);
34334 + atomic_set_unchecked(&res->read_failures, 0);
34335 + atomic_set_unchecked(&res->write_failures, 0);
34336 rc = 0;
34337 }
34338 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34339 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
34340
34341 /* If this was a SCSI read/write command keep count of errors */
34342 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34343 - atomic_inc(&res->read_failures);
34344 + atomic_inc_unchecked(&res->read_failures);
34345 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34346 - atomic_inc(&res->write_failures);
34347 + atomic_inc_unchecked(&res->write_failures);
34348
34349 if (!RES_IS_GSCSI(res->cfg_entry) &&
34350 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34351 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
34352 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34353 * hrrq_id assigned here in queuecommand
34354 */
34355 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34356 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34357 pinstance->num_hrrq;
34358 cmd->cmd_done = pmcraid_io_done;
34359
34360 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
34361 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34362 * hrrq_id assigned here in queuecommand
34363 */
34364 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34365 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34366 pinstance->num_hrrq;
34367
34368 if (request_size) {
34369 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
34370
34371 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34372 /* add resources only after host is added into system */
34373 - if (!atomic_read(&pinstance->expose_resources))
34374 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34375 return;
34376
34377 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34378 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
34379 init_waitqueue_head(&pinstance->reset_wait_q);
34380
34381 atomic_set(&pinstance->outstanding_cmds, 0);
34382 - atomic_set(&pinstance->last_message_id, 0);
34383 - atomic_set(&pinstance->expose_resources, 0);
34384 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34385 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34386
34387 INIT_LIST_HEAD(&pinstance->free_res_q);
34388 INIT_LIST_HEAD(&pinstance->used_res_q);
34389 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
34390 /* Schedule worker thread to handle CCN and take care of adding and
34391 * removing devices to OS
34392 */
34393 - atomic_set(&pinstance->expose_resources, 1);
34394 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34395 schedule_work(&pinstance->worker_q);
34396 return rc;
34397
34398 diff -urNp linux-3.0.8/drivers/scsi/pmcraid.h linux-3.0.8/drivers/scsi/pmcraid.h
34399 --- linux-3.0.8/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
34400 +++ linux-3.0.8/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
34401 @@ -749,7 +749,7 @@ struct pmcraid_instance {
34402 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34403
34404 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34405 - atomic_t last_message_id;
34406 + atomic_unchecked_t last_message_id;
34407
34408 /* configuration table */
34409 struct pmcraid_config_table *cfg_table;
34410 @@ -778,7 +778,7 @@ struct pmcraid_instance {
34411 atomic_t outstanding_cmds;
34412
34413 /* should add/delete resources to mid-layer now ?*/
34414 - atomic_t expose_resources;
34415 + atomic_unchecked_t expose_resources;
34416
34417
34418
34419 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
34420 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34421 };
34422 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34423 - atomic_t read_failures; /* count of failed READ commands */
34424 - atomic_t write_failures; /* count of failed WRITE commands */
34425 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34426 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34427
34428 /* To indicate add/delete/modify during CCN */
34429 u8 change_detected;
34430 diff -urNp linux-3.0.8/drivers/scsi/qla2xxx/qla_def.h linux-3.0.8/drivers/scsi/qla2xxx/qla_def.h
34431 --- linux-3.0.8/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
34432 +++ linux-3.0.8/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
34433 @@ -2244,7 +2244,7 @@ struct isp_operations {
34434 int (*get_flash_version) (struct scsi_qla_host *, void *);
34435 int (*start_scsi) (srb_t *);
34436 int (*abort_isp) (struct scsi_qla_host *);
34437 -};
34438 +} __no_const;
34439
34440 /* MSI-X Support *************************************************************/
34441
34442 diff -urNp linux-3.0.8/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.8/drivers/scsi/qla4xxx/ql4_def.h
34443 --- linux-3.0.8/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
34444 +++ linux-3.0.8/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
34445 @@ -256,7 +256,7 @@ struct ddb_entry {
34446 atomic_t retry_relogin_timer; /* Min Time between relogins
34447 * (4000 only) */
34448 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34449 - atomic_t relogin_retry_count; /* Num of times relogin has been
34450 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34451 * retried */
34452
34453 uint16_t port;
34454 diff -urNp linux-3.0.8/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.8/drivers/scsi/qla4xxx/ql4_init.c
34455 --- linux-3.0.8/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
34456 +++ linux-3.0.8/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
34457 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
34458 ddb_entry->fw_ddb_index = fw_ddb_index;
34459 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34460 atomic_set(&ddb_entry->relogin_timer, 0);
34461 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34462 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34463 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34464 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34465 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34466 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
34467 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
34468 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
34469 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34470 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34471 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34472 atomic_set(&ddb_entry->relogin_timer, 0);
34473 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34474 iscsi_unblock_session(ddb_entry->sess);
34475 diff -urNp linux-3.0.8/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.8/drivers/scsi/qla4xxx/ql4_os.c
34476 --- linux-3.0.8/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
34477 +++ linux-3.0.8/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
34478 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
34479 ddb_entry->fw_ddb_device_state ==
34480 DDB_DS_SESSION_FAILED) {
34481 /* Reset retry relogin timer */
34482 - atomic_inc(&ddb_entry->relogin_retry_count);
34483 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34484 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
34485 " timed out-retrying"
34486 " relogin (%d)\n",
34487 ha->host_no,
34488 ddb_entry->fw_ddb_index,
34489 - atomic_read(&ddb_entry->
34490 + atomic_read_unchecked(&ddb_entry->
34491 relogin_retry_count))
34492 );
34493 start_dpc++;
34494 diff -urNp linux-3.0.8/drivers/scsi/scsi.c linux-3.0.8/drivers/scsi/scsi.c
34495 --- linux-3.0.8/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
34496 +++ linux-3.0.8/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
34497 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34498 unsigned long timeout;
34499 int rtn = 0;
34500
34501 - atomic_inc(&cmd->device->iorequest_cnt);
34502 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34503
34504 /* check if the device is still usable */
34505 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34506 diff -urNp linux-3.0.8/drivers/scsi/scsi_debug.c linux-3.0.8/drivers/scsi/scsi_debug.c
34507 --- linux-3.0.8/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
34508 +++ linux-3.0.8/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
34509 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
34510 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34511 unsigned char *cmd = (unsigned char *)scp->cmnd;
34512
34513 + pax_track_stack();
34514 +
34515 if ((errsts = check_readiness(scp, 1, devip)))
34516 return errsts;
34517 memset(arr, 0, sizeof(arr));
34518 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
34519 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34520 unsigned char *cmd = (unsigned char *)scp->cmnd;
34521
34522 + pax_track_stack();
34523 +
34524 if ((errsts = check_readiness(scp, 1, devip)))
34525 return errsts;
34526 memset(arr, 0, sizeof(arr));
34527 diff -urNp linux-3.0.8/drivers/scsi/scsi_lib.c linux-3.0.8/drivers/scsi/scsi_lib.c
34528 --- linux-3.0.8/drivers/scsi/scsi_lib.c 2011-10-24 08:05:21.000000000 -0400
34529 +++ linux-3.0.8/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
34530 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
34531 shost = sdev->host;
34532 scsi_init_cmd_errh(cmd);
34533 cmd->result = DID_NO_CONNECT << 16;
34534 - atomic_inc(&cmd->device->iorequest_cnt);
34535 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34536
34537 /*
34538 * SCSI request completion path will do scsi_device_unbusy(),
34539 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
34540
34541 INIT_LIST_HEAD(&cmd->eh_entry);
34542
34543 - atomic_inc(&cmd->device->iodone_cnt);
34544 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34545 if (cmd->result)
34546 - atomic_inc(&cmd->device->ioerr_cnt);
34547 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34548
34549 disposition = scsi_decide_disposition(cmd);
34550 if (disposition != SUCCESS &&
34551 diff -urNp linux-3.0.8/drivers/scsi/scsi_sysfs.c linux-3.0.8/drivers/scsi/scsi_sysfs.c
34552 --- linux-3.0.8/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
34553 +++ linux-3.0.8/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
34554 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
34555 char *buf) \
34556 { \
34557 struct scsi_device *sdev = to_scsi_device(dev); \
34558 - unsigned long long count = atomic_read(&sdev->field); \
34559 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34560 return snprintf(buf, 20, "0x%llx\n", count); \
34561 } \
34562 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34563 diff -urNp linux-3.0.8/drivers/scsi/scsi_tgt_lib.c linux-3.0.8/drivers/scsi/scsi_tgt_lib.c
34564 --- linux-3.0.8/drivers/scsi/scsi_tgt_lib.c 2011-07-21 22:17:23.000000000 -0400
34565 +++ linux-3.0.8/drivers/scsi/scsi_tgt_lib.c 2011-10-06 04:17:55.000000000 -0400
34566 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34567 int err;
34568
34569 dprintk("%lx %u\n", uaddr, len);
34570 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34571 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34572 if (err) {
34573 /*
34574 * TODO: need to fixup sg_tablesize, max_segment_size,
34575 diff -urNp linux-3.0.8/drivers/scsi/scsi_transport_fc.c linux-3.0.8/drivers/scsi/scsi_transport_fc.c
34576 --- linux-3.0.8/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
34577 +++ linux-3.0.8/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
34578 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34579 * Netlink Infrastructure
34580 */
34581
34582 -static atomic_t fc_event_seq;
34583 +static atomic_unchecked_t fc_event_seq;
34584
34585 /**
34586 * fc_get_event_number - Obtain the next sequential FC event number
34587 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34588 u32
34589 fc_get_event_number(void)
34590 {
34591 - return atomic_add_return(1, &fc_event_seq);
34592 + return atomic_add_return_unchecked(1, &fc_event_seq);
34593 }
34594 EXPORT_SYMBOL(fc_get_event_number);
34595
34596 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34597 {
34598 int error;
34599
34600 - atomic_set(&fc_event_seq, 0);
34601 + atomic_set_unchecked(&fc_event_seq, 0);
34602
34603 error = transport_class_register(&fc_host_class);
34604 if (error)
34605 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34606 char *cp;
34607
34608 *val = simple_strtoul(buf, &cp, 0);
34609 - if ((*cp && (*cp != '\n')) || (*val < 0))
34610 + if (*cp && (*cp != '\n'))
34611 return -EINVAL;
34612 /*
34613 * Check for overflow; dev_loss_tmo is u32
34614 diff -urNp linux-3.0.8/drivers/scsi/scsi_transport_iscsi.c linux-3.0.8/drivers/scsi/scsi_transport_iscsi.c
34615 --- linux-3.0.8/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
34616 +++ linux-3.0.8/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
34617 @@ -83,7 +83,7 @@ struct iscsi_internal {
34618 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34619 };
34620
34621 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34622 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34623 static struct workqueue_struct *iscsi_eh_timer_workq;
34624
34625 /*
34626 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34627 int err;
34628
34629 ihost = shost->shost_data;
34630 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34631 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34632
34633 if (id == ISCSI_MAX_TARGET) {
34634 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34635 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34636 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34637 ISCSI_TRANSPORT_VERSION);
34638
34639 - atomic_set(&iscsi_session_nr, 0);
34640 + atomic_set_unchecked(&iscsi_session_nr, 0);
34641
34642 err = class_register(&iscsi_transport_class);
34643 if (err)
34644 diff -urNp linux-3.0.8/drivers/scsi/scsi_transport_srp.c linux-3.0.8/drivers/scsi/scsi_transport_srp.c
34645 --- linux-3.0.8/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
34646 +++ linux-3.0.8/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
34647 @@ -33,7 +33,7 @@
34648 #include "scsi_transport_srp_internal.h"
34649
34650 struct srp_host_attrs {
34651 - atomic_t next_port_id;
34652 + atomic_unchecked_t next_port_id;
34653 };
34654 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34655
34656 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34657 struct Scsi_Host *shost = dev_to_shost(dev);
34658 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34659
34660 - atomic_set(&srp_host->next_port_id, 0);
34661 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34662 return 0;
34663 }
34664
34665 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34666 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34667 rport->roles = ids->roles;
34668
34669 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34670 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34671 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34672
34673 transport_setup_device(&rport->dev);
34674 diff -urNp linux-3.0.8/drivers/scsi/sg.c linux-3.0.8/drivers/scsi/sg.c
34675 --- linux-3.0.8/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
34676 +++ linux-3.0.8/drivers/scsi/sg.c 2011-10-06 04:17:55.000000000 -0400
34677 @@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34678 sdp->disk->disk_name,
34679 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34680 NULL,
34681 - (char *)arg);
34682 + (char __user *)arg);
34683 case BLKTRACESTART:
34684 return blk_trace_startstop(sdp->device->request_queue, 1);
34685 case BLKTRACESTOP:
34686 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34687 const struct file_operations * fops;
34688 };
34689
34690 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34691 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34692 {"allow_dio", &adio_fops},
34693 {"debug", &debug_fops},
34694 {"def_reserved_size", &dressz_fops},
34695 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
34696 {
34697 int k, mask;
34698 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34699 - struct sg_proc_leaf * leaf;
34700 + const struct sg_proc_leaf * leaf;
34701
34702 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34703 if (!sg_proc_sgp)
34704 diff -urNp linux-3.0.8/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.8/drivers/scsi/sym53c8xx_2/sym_glue.c
34705 --- linux-3.0.8/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
34706 +++ linux-3.0.8/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
34707 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34708 int do_iounmap = 0;
34709 int do_disable_device = 1;
34710
34711 + pax_track_stack();
34712 +
34713 memset(&sym_dev, 0, sizeof(sym_dev));
34714 memset(&nvram, 0, sizeof(nvram));
34715 sym_dev.pdev = pdev;
34716 diff -urNp linux-3.0.8/drivers/scsi/vmw_pvscsi.c linux-3.0.8/drivers/scsi/vmw_pvscsi.c
34717 --- linux-3.0.8/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
34718 +++ linux-3.0.8/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
34719 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34720 dma_addr_t base;
34721 unsigned i;
34722
34723 + pax_track_stack();
34724 +
34725 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34726 cmd.reqRingNumPages = adapter->req_pages;
34727 cmd.cmpRingNumPages = adapter->cmp_pages;
34728 diff -urNp linux-3.0.8/drivers/spi/dw_spi_pci.c linux-3.0.8/drivers/spi/dw_spi_pci.c
34729 --- linux-3.0.8/drivers/spi/dw_spi_pci.c 2011-07-21 22:17:23.000000000 -0400
34730 +++ linux-3.0.8/drivers/spi/dw_spi_pci.c 2011-10-11 10:44:33.000000000 -0400
34731 @@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34732 #define spi_resume NULL
34733 #endif
34734
34735 -static const struct pci_device_id pci_ids[] __devinitdata = {
34736 +static const struct pci_device_id pci_ids[] __devinitconst = {
34737 /* Intel MID platform SPI controller 0 */
34738 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34739 {},
34740 diff -urNp linux-3.0.8/drivers/spi/spi.c linux-3.0.8/drivers/spi/spi.c
34741 --- linux-3.0.8/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
34742 +++ linux-3.0.8/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
34743 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34744 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34745
34746 /* portable code must never pass more than 32 bytes */
34747 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34748 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34749
34750 static u8 *buf;
34751
34752 diff -urNp linux-3.0.8/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.8/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34753 --- linux-3.0.8/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-10-24 08:05:21.000000000 -0400
34754 +++ linux-3.0.8/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
34755 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34756 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34757
34758
34759 -static struct net_device_ops ar6000_netdev_ops = {
34760 +static net_device_ops_no_const ar6000_netdev_ops = {
34761 .ndo_init = NULL,
34762 .ndo_open = ar6000_open,
34763 .ndo_stop = ar6000_close,
34764 diff -urNp linux-3.0.8/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.8/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34765 --- linux-3.0.8/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
34766 +++ linux-3.0.8/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
34767 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34768 typedef struct ar6k_pal_config_s
34769 {
34770 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34771 -}ar6k_pal_config_t;
34772 +} __no_const ar6k_pal_config_t;
34773
34774 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34775 #endif /* _AR6K_PAL_H_ */
34776 diff -urNp linux-3.0.8/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.8/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34777 --- linux-3.0.8/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
34778 +++ linux-3.0.8/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
34779 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
34780 free_netdev(ifp->net);
34781 }
34782 /* Allocate etherdev, including space for private structure */
34783 - ifp->net = alloc_etherdev(sizeof(dhd));
34784 + ifp->net = alloc_etherdev(sizeof(*dhd));
34785 if (!ifp->net) {
34786 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34787 ret = -ENOMEM;
34788 }
34789 if (ret == 0) {
34790 strcpy(ifp->net->name, ifp->name);
34791 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
34792 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
34793 err = dhd_net_attach(&dhd->pub, ifp->idx);
34794 if (err != 0) {
34795 DHD_ERROR(("%s: dhd_net_attach failed, "
34796 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34797 strcpy(nv_path, nvram_path);
34798
34799 /* Allocate etherdev, including space for private structure */
34800 - net = alloc_etherdev(sizeof(dhd));
34801 + net = alloc_etherdev(sizeof(*dhd));
34802 if (!net) {
34803 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34804 goto fail;
34805 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34806 /*
34807 * Save the dhd_info into the priv
34808 */
34809 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34810 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34811
34812 /* Set network interface name if it was provided as module parameter */
34813 if (iface_name[0]) {
34814 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34815 /*
34816 * Save the dhd_info into the priv
34817 */
34818 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34819 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34820
34821 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
34822 g_bus = bus;
34823 diff -urNp linux-3.0.8/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.8/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
34824 --- linux-3.0.8/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
34825 +++ linux-3.0.8/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
34826 @@ -593,7 +593,7 @@ struct phy_func_ptr {
34827 initfn_t carrsuppr;
34828 rxsigpwrfn_t rxsigpwr;
34829 detachfn_t detach;
34830 -};
34831 +} __no_const;
34832 typedef struct phy_func_ptr phy_func_ptr_t;
34833
34834 struct phy_info {
34835 diff -urNp linux-3.0.8/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.8/drivers/staging/brcm80211/include/bcmsdh.h
34836 --- linux-3.0.8/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
34837 +++ linux-3.0.8/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
34838 @@ -185,7 +185,7 @@ typedef struct {
34839 u16 func, uint bustype, void *regsva, void *param);
34840 /* detach from device */
34841 void (*detach) (void *ch);
34842 -} bcmsdh_driver_t;
34843 +} __no_const bcmsdh_driver_t;
34844
34845 /* platform specific/high level functions */
34846 extern int bcmsdh_register(bcmsdh_driver_t *driver);
34847 diff -urNp linux-3.0.8/drivers/staging/et131x/et1310_tx.c linux-3.0.8/drivers/staging/et131x/et1310_tx.c
34848 --- linux-3.0.8/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
34849 +++ linux-3.0.8/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
34850 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34851 struct net_device_stats *stats = &etdev->net_stats;
34852
34853 if (tcb->flags & fMP_DEST_BROAD)
34854 - atomic_inc(&etdev->Stats.brdcstxmt);
34855 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34856 else if (tcb->flags & fMP_DEST_MULTI)
34857 - atomic_inc(&etdev->Stats.multixmt);
34858 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34859 else
34860 - atomic_inc(&etdev->Stats.unixmt);
34861 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34862
34863 if (tcb->skb) {
34864 stats->tx_bytes += tcb->skb->len;
34865 diff -urNp linux-3.0.8/drivers/staging/et131x/et131x_adapter.h linux-3.0.8/drivers/staging/et131x/et131x_adapter.h
34866 --- linux-3.0.8/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
34867 +++ linux-3.0.8/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
34868 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
34869 * operations
34870 */
34871 u32 unircv; /* # multicast packets received */
34872 - atomic_t unixmt; /* # multicast packets for Tx */
34873 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34874 u32 multircv; /* # multicast packets received */
34875 - atomic_t multixmt; /* # multicast packets for Tx */
34876 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34877 u32 brdcstrcv; /* # broadcast packets received */
34878 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34879 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34880 u32 norcvbuf; /* # Rx packets discarded */
34881 u32 noxmtbuf; /* # Tx packets discarded */
34882
34883 diff -urNp linux-3.0.8/drivers/staging/hv/channel.c linux-3.0.8/drivers/staging/hv/channel.c
34884 --- linux-3.0.8/drivers/staging/hv/channel.c 2011-10-24 08:05:21.000000000 -0400
34885 +++ linux-3.0.8/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
34886 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34887 int ret = 0;
34888 int t;
34889
34890 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34891 - atomic_inc(&vmbus_connection.next_gpadl_handle);
34892 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34893 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34894
34895 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34896 if (ret)
34897 diff -urNp linux-3.0.8/drivers/staging/hv/hv.c linux-3.0.8/drivers/staging/hv/hv.c
34898 --- linux-3.0.8/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
34899 +++ linux-3.0.8/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
34900 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34901 u64 output_address = (output) ? virt_to_phys(output) : 0;
34902 u32 output_address_hi = output_address >> 32;
34903 u32 output_address_lo = output_address & 0xFFFFFFFF;
34904 - volatile void *hypercall_page = hv_context.hypercall_page;
34905 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34906
34907 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34908 "=a"(hv_status_lo) : "d" (control_hi),
34909 diff -urNp linux-3.0.8/drivers/staging/hv/hv_mouse.c linux-3.0.8/drivers/staging/hv/hv_mouse.c
34910 --- linux-3.0.8/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
34911 +++ linux-3.0.8/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
34912 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
34913 if (hid_dev) {
34914 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34915
34916 - hid_dev->ll_driver->open = mousevsc_hid_open;
34917 - hid_dev->ll_driver->close = mousevsc_hid_close;
34918 + pax_open_kernel();
34919 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34920 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34921 + pax_close_kernel();
34922
34923 hid_dev->bus = BUS_VIRTUAL;
34924 hid_dev->vendor = input_device_ctx->device_info.vendor;
34925 diff -urNp linux-3.0.8/drivers/staging/hv/hyperv_vmbus.h linux-3.0.8/drivers/staging/hv/hyperv_vmbus.h
34926 --- linux-3.0.8/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
34927 +++ linux-3.0.8/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
34928 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
34929 struct vmbus_connection {
34930 enum vmbus_connect_state conn_state;
34931
34932 - atomic_t next_gpadl_handle;
34933 + atomic_unchecked_t next_gpadl_handle;
34934
34935 /*
34936 * Represents channel interrupts. Each bit position represents a
34937 diff -urNp linux-3.0.8/drivers/staging/hv/rndis_filter.c linux-3.0.8/drivers/staging/hv/rndis_filter.c
34938 --- linux-3.0.8/drivers/staging/hv/rndis_filter.c 2011-10-24 08:05:21.000000000 -0400
34939 +++ linux-3.0.8/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
34940 @@ -43,7 +43,7 @@ struct rndis_device {
34941
34942 enum rndis_device_state state;
34943 u32 link_stat;
34944 - atomic_t new_req_id;
34945 + atomic_unchecked_t new_req_id;
34946
34947 spinlock_t request_lock;
34948 struct list_head req_list;
34949 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34950 * template
34951 */
34952 set = &rndis_msg->msg.set_req;
34953 - set->req_id = atomic_inc_return(&dev->new_req_id);
34954 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34955
34956 /* Add to the request list */
34957 spin_lock_irqsave(&dev->request_lock, flags);
34958 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
34959
34960 /* Setup the rndis set */
34961 halt = &request->request_msg.msg.halt_req;
34962 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34963 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34964
34965 /* Ignore return since this msg is optional. */
34966 rndis_filter_send_request(dev, request);
34967 diff -urNp linux-3.0.8/drivers/staging/hv/vmbus_drv.c linux-3.0.8/drivers/staging/hv/vmbus_drv.c
34968 --- linux-3.0.8/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
34969 +++ linux-3.0.8/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
34970 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
34971 {
34972 int ret = 0;
34973
34974 - static atomic_t device_num = ATOMIC_INIT(0);
34975 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34976
34977 /* Set the device name. Otherwise, device_register() will fail. */
34978 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34979 - atomic_inc_return(&device_num));
34980 + atomic_inc_return_unchecked(&device_num));
34981
34982 /* The new device belongs to this bus */
34983 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34984 diff -urNp linux-3.0.8/drivers/staging/iio/ring_generic.h linux-3.0.8/drivers/staging/iio/ring_generic.h
34985 --- linux-3.0.8/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
34986 +++ linux-3.0.8/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
34987 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34988
34989 int (*is_enabled)(struct iio_ring_buffer *ring);
34990 int (*enable)(struct iio_ring_buffer *ring);
34991 -};
34992 +} __no_const;
34993
34994 struct iio_ring_setup_ops {
34995 int (*preenable)(struct iio_dev *);
34996 diff -urNp linux-3.0.8/drivers/staging/mei/interface.c linux-3.0.8/drivers/staging/mei/interface.c
34997 --- linux-3.0.8/drivers/staging/mei/interface.c 2011-07-21 22:17:23.000000000 -0400
34998 +++ linux-3.0.8/drivers/staging/mei/interface.c 2011-11-01 05:23:50.000000000 -0400
34999 @@ -333,7 +333,7 @@ int mei_send_flow_control(struct mei_dev
35000 mei_hdr->reserved = 0;
35001
35002 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
35003 - memset(mei_flow_control, 0, sizeof(mei_flow_control));
35004 + memset(mei_flow_control, 0, sizeof(*mei_flow_control));
35005 mei_flow_control->host_addr = cl->host_client_id;
35006 mei_flow_control->me_addr = cl->me_client_id;
35007 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
35008 @@ -397,7 +397,7 @@ int mei_disconnect(struct mei_device *de
35009
35010 mei_cli_disconnect =
35011 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
35012 - memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
35013 + memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
35014 mei_cli_disconnect->host_addr = cl->host_client_id;
35015 mei_cli_disconnect->me_addr = cl->me_client_id;
35016 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
35017 diff -urNp linux-3.0.8/drivers/staging/octeon/ethernet.c linux-3.0.8/drivers/staging/octeon/ethernet.c
35018 --- linux-3.0.8/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
35019 +++ linux-3.0.8/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
35020 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
35021 * since the RX tasklet also increments it.
35022 */
35023 #ifdef CONFIG_64BIT
35024 - atomic64_add(rx_status.dropped_packets,
35025 - (atomic64_t *)&priv->stats.rx_dropped);
35026 + atomic64_add_unchecked(rx_status.dropped_packets,
35027 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35028 #else
35029 - atomic_add(rx_status.dropped_packets,
35030 - (atomic_t *)&priv->stats.rx_dropped);
35031 + atomic_add_unchecked(rx_status.dropped_packets,
35032 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35033 #endif
35034 }
35035
35036 diff -urNp linux-3.0.8/drivers/staging/octeon/ethernet-rx.c linux-3.0.8/drivers/staging/octeon/ethernet-rx.c
35037 --- linux-3.0.8/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
35038 +++ linux-3.0.8/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
35039 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
35040 /* Increment RX stats for virtual ports */
35041 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35042 #ifdef CONFIG_64BIT
35043 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35044 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35045 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35046 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35047 #else
35048 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35049 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35050 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35051 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35052 #endif
35053 }
35054 netif_receive_skb(skb);
35055 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
35056 dev->name);
35057 */
35058 #ifdef CONFIG_64BIT
35059 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35060 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35061 #else
35062 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35063 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35064 #endif
35065 dev_kfree_skb_irq(skb);
35066 }
35067 diff -urNp linux-3.0.8/drivers/staging/pohmelfs/inode.c linux-3.0.8/drivers/staging/pohmelfs/inode.c
35068 --- linux-3.0.8/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
35069 +++ linux-3.0.8/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
35070 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
35071 mutex_init(&psb->mcache_lock);
35072 psb->mcache_root = RB_ROOT;
35073 psb->mcache_timeout = msecs_to_jiffies(5000);
35074 - atomic_long_set(&psb->mcache_gen, 0);
35075 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35076
35077 psb->trans_max_pages = 100;
35078
35079 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
35080 INIT_LIST_HEAD(&psb->crypto_ready_list);
35081 INIT_LIST_HEAD(&psb->crypto_active_list);
35082
35083 - atomic_set(&psb->trans_gen, 1);
35084 + atomic_set_unchecked(&psb->trans_gen, 1);
35085 atomic_long_set(&psb->total_inodes, 0);
35086
35087 mutex_init(&psb->state_lock);
35088 diff -urNp linux-3.0.8/drivers/staging/pohmelfs/mcache.c linux-3.0.8/drivers/staging/pohmelfs/mcache.c
35089 --- linux-3.0.8/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
35090 +++ linux-3.0.8/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
35091 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35092 m->data = data;
35093 m->start = start;
35094 m->size = size;
35095 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35096 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35097
35098 mutex_lock(&psb->mcache_lock);
35099 err = pohmelfs_mcache_insert(psb, m);
35100 diff -urNp linux-3.0.8/drivers/staging/pohmelfs/netfs.h linux-3.0.8/drivers/staging/pohmelfs/netfs.h
35101 --- linux-3.0.8/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
35102 +++ linux-3.0.8/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
35103 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35104 struct pohmelfs_sb {
35105 struct rb_root mcache_root;
35106 struct mutex mcache_lock;
35107 - atomic_long_t mcache_gen;
35108 + atomic_long_unchecked_t mcache_gen;
35109 unsigned long mcache_timeout;
35110
35111 unsigned int idx;
35112
35113 unsigned int trans_retries;
35114
35115 - atomic_t trans_gen;
35116 + atomic_unchecked_t trans_gen;
35117
35118 unsigned int crypto_attached_size;
35119 unsigned int crypto_align_size;
35120 diff -urNp linux-3.0.8/drivers/staging/pohmelfs/trans.c linux-3.0.8/drivers/staging/pohmelfs/trans.c
35121 --- linux-3.0.8/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
35122 +++ linux-3.0.8/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
35123 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35124 int err;
35125 struct netfs_cmd *cmd = t->iovec.iov_base;
35126
35127 - t->gen = atomic_inc_return(&psb->trans_gen);
35128 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35129
35130 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35131 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35132 diff -urNp linux-3.0.8/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.8/drivers/staging/rtl8712/rtl871x_io.h
35133 --- linux-3.0.8/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
35134 +++ linux-3.0.8/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
35135 @@ -83,7 +83,7 @@ struct _io_ops {
35136 u8 *pmem);
35137 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35138 u8 *pmem);
35139 -};
35140 +} __no_const;
35141
35142 struct io_req {
35143 struct list_head list;
35144 diff -urNp linux-3.0.8/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.8/drivers/staging/sbe-2t3e3/netdev.c
35145 --- linux-3.0.8/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
35146 +++ linux-3.0.8/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
35147 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
35148 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35149
35150 if (rlen)
35151 - if (copy_to_user(data, &resp, rlen))
35152 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35153 return -EFAULT;
35154
35155 return 0;
35156 diff -urNp linux-3.0.8/drivers/staging/tty/stallion.c linux-3.0.8/drivers/staging/tty/stallion.c
35157 --- linux-3.0.8/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
35158 +++ linux-3.0.8/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
35159 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
35160 struct stlport stl_dummyport;
35161 struct stlport *portp;
35162
35163 + pax_track_stack();
35164 +
35165 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
35166 return -EFAULT;
35167 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
35168 diff -urNp linux-3.0.8/drivers/staging/usbip/usbip_common.h linux-3.0.8/drivers/staging/usbip/usbip_common.h
35169 --- linux-3.0.8/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
35170 +++ linux-3.0.8/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
35171 @@ -315,7 +315,7 @@ struct usbip_device {
35172 void (*shutdown)(struct usbip_device *);
35173 void (*reset)(struct usbip_device *);
35174 void (*unusable)(struct usbip_device *);
35175 - } eh_ops;
35176 + } __no_const eh_ops;
35177 };
35178
35179 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
35180 diff -urNp linux-3.0.8/drivers/staging/usbip/vhci.h linux-3.0.8/drivers/staging/usbip/vhci.h
35181 --- linux-3.0.8/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
35182 +++ linux-3.0.8/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
35183 @@ -94,7 +94,7 @@ struct vhci_hcd {
35184 unsigned resuming:1;
35185 unsigned long re_timeout;
35186
35187 - atomic_t seqnum;
35188 + atomic_unchecked_t seqnum;
35189
35190 /*
35191 * NOTE:
35192 diff -urNp linux-3.0.8/drivers/staging/usbip/vhci_hcd.c linux-3.0.8/drivers/staging/usbip/vhci_hcd.c
35193 --- linux-3.0.8/drivers/staging/usbip/vhci_hcd.c 2011-10-24 08:05:21.000000000 -0400
35194 +++ linux-3.0.8/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
35195 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
35196 return;
35197 }
35198
35199 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35200 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35201 if (priv->seqnum == 0xffff)
35202 dev_info(&urb->dev->dev, "seqnum max\n");
35203
35204 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
35205 return -ENOMEM;
35206 }
35207
35208 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35209 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35210 if (unlink->seqnum == 0xffff)
35211 pr_info("seqnum max\n");
35212
35213 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
35214 vdev->rhport = rhport;
35215 }
35216
35217 - atomic_set(&vhci->seqnum, 0);
35218 + atomic_set_unchecked(&vhci->seqnum, 0);
35219 spin_lock_init(&vhci->lock);
35220
35221 hcd->power_budget = 0; /* no limit */
35222 diff -urNp linux-3.0.8/drivers/staging/usbip/vhci_rx.c linux-3.0.8/drivers/staging/usbip/vhci_rx.c
35223 --- linux-3.0.8/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
35224 +++ linux-3.0.8/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
35225 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
35226 if (!urb) {
35227 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35228 pr_info("max seqnum %d\n",
35229 - atomic_read(&the_controller->seqnum));
35230 + atomic_read_unchecked(&the_controller->seqnum));
35231 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35232 return;
35233 }
35234 diff -urNp linux-3.0.8/drivers/staging/vt6655/hostap.c linux-3.0.8/drivers/staging/vt6655/hostap.c
35235 --- linux-3.0.8/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
35236 +++ linux-3.0.8/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
35237 @@ -79,14 +79,13 @@ static int msglevel
35238 *
35239 */
35240
35241 +static net_device_ops_no_const apdev_netdev_ops;
35242 +
35243 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35244 {
35245 PSDevice apdev_priv;
35246 struct net_device *dev = pDevice->dev;
35247 int ret;
35248 - const struct net_device_ops apdev_netdev_ops = {
35249 - .ndo_start_xmit = pDevice->tx_80211,
35250 - };
35251
35252 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35253
35254 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
35255 *apdev_priv = *pDevice;
35256 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35257
35258 + /* only half broken now */
35259 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35260 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35261
35262 pDevice->apdev->type = ARPHRD_IEEE80211;
35263 diff -urNp linux-3.0.8/drivers/staging/vt6656/hostap.c linux-3.0.8/drivers/staging/vt6656/hostap.c
35264 --- linux-3.0.8/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
35265 +++ linux-3.0.8/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
35266 @@ -80,14 +80,13 @@ static int msglevel
35267 *
35268 */
35269
35270 +static net_device_ops_no_const apdev_netdev_ops;
35271 +
35272 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35273 {
35274 PSDevice apdev_priv;
35275 struct net_device *dev = pDevice->dev;
35276 int ret;
35277 - const struct net_device_ops apdev_netdev_ops = {
35278 - .ndo_start_xmit = pDevice->tx_80211,
35279 - };
35280
35281 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35282
35283 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
35284 *apdev_priv = *pDevice;
35285 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35286
35287 + /* only half broken now */
35288 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35289 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35290
35291 pDevice->apdev->type = ARPHRD_IEEE80211;
35292 diff -urNp linux-3.0.8/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.8/drivers/staging/wlan-ng/hfa384x_usb.c
35293 --- linux-3.0.8/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
35294 +++ linux-3.0.8/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
35295 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
35296
35297 struct usbctlx_completor {
35298 int (*complete) (struct usbctlx_completor *);
35299 -};
35300 +} __no_const;
35301
35302 static int
35303 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35304 diff -urNp linux-3.0.8/drivers/staging/zcache/tmem.c linux-3.0.8/drivers/staging/zcache/tmem.c
35305 --- linux-3.0.8/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
35306 +++ linux-3.0.8/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
35307 @@ -39,7 +39,7 @@
35308 * A tmem host implementation must use this function to register callbacks
35309 * for memory allocation.
35310 */
35311 -static struct tmem_hostops tmem_hostops;
35312 +static tmem_hostops_no_const tmem_hostops;
35313
35314 static void tmem_objnode_tree_init(void);
35315
35316 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
35317 * A tmem host implementation must use this function to register
35318 * callbacks for a page-accessible memory (PAM) implementation
35319 */
35320 -static struct tmem_pamops tmem_pamops;
35321 +static tmem_pamops_no_const tmem_pamops;
35322
35323 void tmem_register_pamops(struct tmem_pamops *m)
35324 {
35325 diff -urNp linux-3.0.8/drivers/staging/zcache/tmem.h linux-3.0.8/drivers/staging/zcache/tmem.h
35326 --- linux-3.0.8/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
35327 +++ linux-3.0.8/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
35328 @@ -171,6 +171,7 @@ struct tmem_pamops {
35329 int (*get_data)(struct page *, void *, struct tmem_pool *);
35330 void (*free)(void *, struct tmem_pool *);
35331 };
35332 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35333 extern void tmem_register_pamops(struct tmem_pamops *m);
35334
35335 /* memory allocation methods provided by the host implementation */
35336 @@ -180,6 +181,7 @@ struct tmem_hostops {
35337 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35338 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35339 };
35340 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35341 extern void tmem_register_hostops(struct tmem_hostops *m);
35342
35343 /* core tmem accessor functions */
35344 diff -urNp linux-3.0.8/drivers/target/target_core_alua.c linux-3.0.8/drivers/target/target_core_alua.c
35345 --- linux-3.0.8/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
35346 +++ linux-3.0.8/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
35347 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
35348 char path[ALUA_METADATA_PATH_LEN];
35349 int len;
35350
35351 + pax_track_stack();
35352 +
35353 memset(path, 0, ALUA_METADATA_PATH_LEN);
35354
35355 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
35356 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
35357 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
35358 int len;
35359
35360 + pax_track_stack();
35361 +
35362 memset(path, 0, ALUA_METADATA_PATH_LEN);
35363 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
35364
35365 diff -urNp linux-3.0.8/drivers/target/target_core_cdb.c linux-3.0.8/drivers/target/target_core_cdb.c
35366 --- linux-3.0.8/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
35367 +++ linux-3.0.8/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
35368 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
35369 int length = 0;
35370 unsigned char buf[SE_MODE_PAGE_BUF];
35371
35372 + pax_track_stack();
35373 +
35374 memset(buf, 0, SE_MODE_PAGE_BUF);
35375
35376 switch (cdb[2] & 0x3f) {
35377 diff -urNp linux-3.0.8/drivers/target/target_core_configfs.c linux-3.0.8/drivers/target/target_core_configfs.c
35378 --- linux-3.0.8/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
35379 +++ linux-3.0.8/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
35380 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
35381 ssize_t len = 0;
35382 int reg_count = 0, prf_isid;
35383
35384 + pax_track_stack();
35385 +
35386 if (!(su_dev->se_dev_ptr))
35387 return -ENODEV;
35388
35389 diff -urNp linux-3.0.8/drivers/target/target_core_pr.c linux-3.0.8/drivers/target/target_core_pr.c
35390 --- linux-3.0.8/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
35391 +++ linux-3.0.8/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
35392 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
35393 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
35394 u16 tpgt;
35395
35396 + pax_track_stack();
35397 +
35398 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
35399 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
35400 /*
35401 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
35402 ssize_t len = 0;
35403 int reg_count = 0;
35404
35405 + pax_track_stack();
35406 +
35407 memset(buf, 0, pr_aptpl_buf_len);
35408 /*
35409 * Called to clear metadata once APTPL has been deactivated.
35410 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
35411 char path[512];
35412 int ret;
35413
35414 + pax_track_stack();
35415 +
35416 memset(iov, 0, sizeof(struct iovec));
35417 memset(path, 0, 512);
35418
35419 diff -urNp linux-3.0.8/drivers/target/target_core_tmr.c linux-3.0.8/drivers/target/target_core_tmr.c
35420 --- linux-3.0.8/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
35421 +++ linux-3.0.8/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
35422 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
35423 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
35424 T_TASK(cmd)->t_task_cdbs,
35425 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
35426 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
35427 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
35428 atomic_read(&T_TASK(cmd)->t_transport_active),
35429 atomic_read(&T_TASK(cmd)->t_transport_stop),
35430 atomic_read(&T_TASK(cmd)->t_transport_sent));
35431 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
35432 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
35433 " task: %p, t_fe_count: %d dev: %p\n", task,
35434 fe_count, dev);
35435 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
35436 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
35437 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
35438 flags);
35439 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35440 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
35441 }
35442 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35443 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35444 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
35445 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
35446 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
35447 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35448
35449 diff -urNp linux-3.0.8/drivers/target/target_core_transport.c linux-3.0.8/drivers/target/target_core_transport.c
35450 --- linux-3.0.8/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
35451 +++ linux-3.0.8/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
35452 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
35453
35454 dev->queue_depth = dev_limits->queue_depth;
35455 atomic_set(&dev->depth_left, dev->queue_depth);
35456 - atomic_set(&dev->dev_ordered_id, 0);
35457 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35458
35459 se_dev_set_default_attribs(dev, dev_limits);
35460
35461 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
35462 * Used to determine when ORDERED commands should go from
35463 * Dormant to Active status.
35464 */
35465 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
35466 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
35467 smp_mb__after_atomic_inc();
35468 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35469 cmd->se_ordered_id, cmd->sam_task_attr,
35470 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
35471 " t_transport_active: %d t_transport_stop: %d"
35472 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
35473 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
35474 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
35475 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
35476 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
35477 atomic_read(&T_TASK(cmd)->t_transport_active),
35478 atomic_read(&T_TASK(cmd)->t_transport_stop),
35479 @@ -2673,9 +2673,9 @@ check_depth:
35480 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
35481 atomic_set(&task->task_active, 1);
35482 atomic_set(&task->task_sent, 1);
35483 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
35484 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
35485
35486 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
35487 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
35488 T_TASK(cmd)->t_task_cdbs)
35489 atomic_set(&cmd->transport_sent, 1);
35490
35491 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
35492 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
35493 }
35494 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
35495 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
35496 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
35497 goto remove;
35498
35499 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
35500 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
35501 {
35502 int ret = 0;
35503
35504 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
35505 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
35506 if (!(send_status) ||
35507 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35508 return 1;
35509 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
35510 */
35511 if (cmd->data_direction == DMA_TO_DEVICE) {
35512 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
35513 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
35514 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
35515 smp_mb__after_atomic_inc();
35516 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
35517 transport_new_cmd_failure(cmd);
35518 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
35519 CMD_TFO(cmd)->get_task_tag(cmd),
35520 T_TASK(cmd)->t_task_cdbs,
35521 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
35522 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
35523 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
35524 atomic_read(&T_TASK(cmd)->t_transport_active),
35525 atomic_read(&T_TASK(cmd)->t_transport_stop),
35526 atomic_read(&T_TASK(cmd)->t_transport_sent));
35527 diff -urNp linux-3.0.8/drivers/telephony/ixj.c linux-3.0.8/drivers/telephony/ixj.c
35528 --- linux-3.0.8/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
35529 +++ linux-3.0.8/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
35530 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35531 bool mContinue;
35532 char *pIn, *pOut;
35533
35534 + pax_track_stack();
35535 +
35536 if (!SCI_Prepare(j))
35537 return 0;
35538
35539 diff -urNp linux-3.0.8/drivers/tty/hvc/hvcs.c linux-3.0.8/drivers/tty/hvc/hvcs.c
35540 --- linux-3.0.8/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
35541 +++ linux-3.0.8/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
35542 @@ -83,6 +83,7 @@
35543 #include <asm/hvcserver.h>
35544 #include <asm/uaccess.h>
35545 #include <asm/vio.h>
35546 +#include <asm/local.h>
35547
35548 /*
35549 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35550 @@ -270,7 +271,7 @@ struct hvcs_struct {
35551 unsigned int index;
35552
35553 struct tty_struct *tty;
35554 - int open_count;
35555 + local_t open_count;
35556
35557 /*
35558 * Used to tell the driver kernel_thread what operations need to take
35559 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
35560
35561 spin_lock_irqsave(&hvcsd->lock, flags);
35562
35563 - if (hvcsd->open_count > 0) {
35564 + if (local_read(&hvcsd->open_count) > 0) {
35565 spin_unlock_irqrestore(&hvcsd->lock, flags);
35566 printk(KERN_INFO "HVCS: vterm state unchanged. "
35567 "The hvcs device node is still in use.\n");
35568 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
35569 if ((retval = hvcs_partner_connect(hvcsd)))
35570 goto error_release;
35571
35572 - hvcsd->open_count = 1;
35573 + local_set(&hvcsd->open_count, 1);
35574 hvcsd->tty = tty;
35575 tty->driver_data = hvcsd;
35576
35577 @@ -1179,7 +1180,7 @@ fast_open:
35578
35579 spin_lock_irqsave(&hvcsd->lock, flags);
35580 kref_get(&hvcsd->kref);
35581 - hvcsd->open_count++;
35582 + local_inc(&hvcsd->open_count);
35583 hvcsd->todo_mask |= HVCS_SCHED_READ;
35584 spin_unlock_irqrestore(&hvcsd->lock, flags);
35585
35586 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35587 hvcsd = tty->driver_data;
35588
35589 spin_lock_irqsave(&hvcsd->lock, flags);
35590 - if (--hvcsd->open_count == 0) {
35591 + if (local_dec_and_test(&hvcsd->open_count)) {
35592
35593 vio_disable_interrupts(hvcsd->vdev);
35594
35595 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35596 free_irq(irq, hvcsd);
35597 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35598 return;
35599 - } else if (hvcsd->open_count < 0) {
35600 + } else if (local_read(&hvcsd->open_count) < 0) {
35601 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35602 " is missmanaged.\n",
35603 - hvcsd->vdev->unit_address, hvcsd->open_count);
35604 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35605 }
35606
35607 spin_unlock_irqrestore(&hvcsd->lock, flags);
35608 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35609
35610 spin_lock_irqsave(&hvcsd->lock, flags);
35611 /* Preserve this so that we know how many kref refs to put */
35612 - temp_open_count = hvcsd->open_count;
35613 + temp_open_count = local_read(&hvcsd->open_count);
35614
35615 /*
35616 * Don't kref put inside the spinlock because the destruction
35617 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35618 hvcsd->tty->driver_data = NULL;
35619 hvcsd->tty = NULL;
35620
35621 - hvcsd->open_count = 0;
35622 + local_set(&hvcsd->open_count, 0);
35623
35624 /* This will drop any buffered data on the floor which is OK in a hangup
35625 * scenario. */
35626 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35627 * the middle of a write operation? This is a crummy place to do this
35628 * but we want to keep it all in the spinlock.
35629 */
35630 - if (hvcsd->open_count <= 0) {
35631 + if (local_read(&hvcsd->open_count) <= 0) {
35632 spin_unlock_irqrestore(&hvcsd->lock, flags);
35633 return -ENODEV;
35634 }
35635 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35636 {
35637 struct hvcs_struct *hvcsd = tty->driver_data;
35638
35639 - if (!hvcsd || hvcsd->open_count <= 0)
35640 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35641 return 0;
35642
35643 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35644 diff -urNp linux-3.0.8/drivers/tty/ipwireless/tty.c linux-3.0.8/drivers/tty/ipwireless/tty.c
35645 --- linux-3.0.8/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
35646 +++ linux-3.0.8/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
35647 @@ -29,6 +29,7 @@
35648 #include <linux/tty_driver.h>
35649 #include <linux/tty_flip.h>
35650 #include <linux/uaccess.h>
35651 +#include <asm/local.h>
35652
35653 #include "tty.h"
35654 #include "network.h"
35655 @@ -51,7 +52,7 @@ struct ipw_tty {
35656 int tty_type;
35657 struct ipw_network *network;
35658 struct tty_struct *linux_tty;
35659 - int open_count;
35660 + local_t open_count;
35661 unsigned int control_lines;
35662 struct mutex ipw_tty_mutex;
35663 int tx_bytes_queued;
35664 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35665 mutex_unlock(&tty->ipw_tty_mutex);
35666 return -ENODEV;
35667 }
35668 - if (tty->open_count == 0)
35669 + if (local_read(&tty->open_count) == 0)
35670 tty->tx_bytes_queued = 0;
35671
35672 - tty->open_count++;
35673 + local_inc(&tty->open_count);
35674
35675 tty->linux_tty = linux_tty;
35676 linux_tty->driver_data = tty;
35677 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35678
35679 static void do_ipw_close(struct ipw_tty *tty)
35680 {
35681 - tty->open_count--;
35682 -
35683 - if (tty->open_count == 0) {
35684 + if (local_dec_return(&tty->open_count) == 0) {
35685 struct tty_struct *linux_tty = tty->linux_tty;
35686
35687 if (linux_tty != NULL) {
35688 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35689 return;
35690
35691 mutex_lock(&tty->ipw_tty_mutex);
35692 - if (tty->open_count == 0) {
35693 + if (local_read(&tty->open_count) == 0) {
35694 mutex_unlock(&tty->ipw_tty_mutex);
35695 return;
35696 }
35697 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35698 return;
35699 }
35700
35701 - if (!tty->open_count) {
35702 + if (!local_read(&tty->open_count)) {
35703 mutex_unlock(&tty->ipw_tty_mutex);
35704 return;
35705 }
35706 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35707 return -ENODEV;
35708
35709 mutex_lock(&tty->ipw_tty_mutex);
35710 - if (!tty->open_count) {
35711 + if (!local_read(&tty->open_count)) {
35712 mutex_unlock(&tty->ipw_tty_mutex);
35713 return -EINVAL;
35714 }
35715 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35716 if (!tty)
35717 return -ENODEV;
35718
35719 - if (!tty->open_count)
35720 + if (!local_read(&tty->open_count))
35721 return -EINVAL;
35722
35723 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35724 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35725 if (!tty)
35726 return 0;
35727
35728 - if (!tty->open_count)
35729 + if (!local_read(&tty->open_count))
35730 return 0;
35731
35732 return tty->tx_bytes_queued;
35733 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35734 if (!tty)
35735 return -ENODEV;
35736
35737 - if (!tty->open_count)
35738 + if (!local_read(&tty->open_count))
35739 return -EINVAL;
35740
35741 return get_control_lines(tty);
35742 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35743 if (!tty)
35744 return -ENODEV;
35745
35746 - if (!tty->open_count)
35747 + if (!local_read(&tty->open_count))
35748 return -EINVAL;
35749
35750 return set_control_lines(tty, set, clear);
35751 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35752 if (!tty)
35753 return -ENODEV;
35754
35755 - if (!tty->open_count)
35756 + if (!local_read(&tty->open_count))
35757 return -EINVAL;
35758
35759 /* FIXME: Exactly how is the tty object locked here .. */
35760 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35761 against a parallel ioctl etc */
35762 mutex_lock(&ttyj->ipw_tty_mutex);
35763 }
35764 - while (ttyj->open_count)
35765 + while (local_read(&ttyj->open_count))
35766 do_ipw_close(ttyj);
35767 ipwireless_disassociate_network_ttys(network,
35768 ttyj->channel_idx);
35769 diff -urNp linux-3.0.8/drivers/tty/n_gsm.c linux-3.0.8/drivers/tty/n_gsm.c
35770 --- linux-3.0.8/drivers/tty/n_gsm.c 2011-10-24 08:05:21.000000000 -0400
35771 +++ linux-3.0.8/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
35772 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35773 return NULL;
35774 spin_lock_init(&dlci->lock);
35775 dlci->fifo = &dlci->_fifo;
35776 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35777 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35778 kfree(dlci);
35779 return NULL;
35780 }
35781 diff -urNp linux-3.0.8/drivers/tty/n_tty.c linux-3.0.8/drivers/tty/n_tty.c
35782 --- linux-3.0.8/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
35783 +++ linux-3.0.8/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
35784 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35785 {
35786 *ops = tty_ldisc_N_TTY;
35787 ops->owner = NULL;
35788 - ops->refcount = ops->flags = 0;
35789 + atomic_set(&ops->refcount, 0);
35790 + ops->flags = 0;
35791 }
35792 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35793 diff -urNp linux-3.0.8/drivers/tty/pty.c linux-3.0.8/drivers/tty/pty.c
35794 --- linux-3.0.8/drivers/tty/pty.c 2011-10-24 08:05:30.000000000 -0400
35795 +++ linux-3.0.8/drivers/tty/pty.c 2011-10-16 21:55:28.000000000 -0400
35796 @@ -767,8 +767,10 @@ static void __init unix98_pty_init(void)
35797 register_sysctl_table(pty_root_table);
35798
35799 /* Now create the /dev/ptmx special device */
35800 + pax_open_kernel();
35801 tty_default_fops(&ptmx_fops);
35802 - ptmx_fops.open = ptmx_open;
35803 + *(void **)&ptmx_fops.open = ptmx_open;
35804 + pax_close_kernel();
35805
35806 cdev_init(&ptmx_cdev, &ptmx_fops);
35807 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35808 diff -urNp linux-3.0.8/drivers/tty/rocket.c linux-3.0.8/drivers/tty/rocket.c
35809 --- linux-3.0.8/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
35810 +++ linux-3.0.8/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
35811 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35812 struct rocket_ports tmp;
35813 int board;
35814
35815 + pax_track_stack();
35816 +
35817 if (!retports)
35818 return -EFAULT;
35819 memset(&tmp, 0, sizeof (tmp));
35820 diff -urNp linux-3.0.8/drivers/tty/serial/kgdboc.c linux-3.0.8/drivers/tty/serial/kgdboc.c
35821 --- linux-3.0.8/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
35822 +++ linux-3.0.8/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
35823 @@ -23,8 +23,9 @@
35824 #define MAX_CONFIG_LEN 40
35825
35826 static struct kgdb_io kgdboc_io_ops;
35827 +static struct kgdb_io kgdboc_io_ops_console;
35828
35829 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35830 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35831 static int configured = -1;
35832
35833 static char config[MAX_CONFIG_LEN];
35834 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35835 kgdboc_unregister_kbd();
35836 if (configured == 1)
35837 kgdb_unregister_io_module(&kgdboc_io_ops);
35838 + else if (configured == 2)
35839 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35840 }
35841
35842 static int configure_kgdboc(void)
35843 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35844 int err;
35845 char *cptr = config;
35846 struct console *cons;
35847 + int is_console = 0;
35848
35849 err = kgdboc_option_setup(config);
35850 if (err || !strlen(config) || isspace(config[0]))
35851 goto noconfig;
35852
35853 err = -ENODEV;
35854 - kgdboc_io_ops.is_console = 0;
35855 kgdb_tty_driver = NULL;
35856
35857 kgdboc_use_kms = 0;
35858 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35859 int idx;
35860 if (cons->device && cons->device(cons, &idx) == p &&
35861 idx == tty_line) {
35862 - kgdboc_io_ops.is_console = 1;
35863 + is_console = 1;
35864 break;
35865 }
35866 cons = cons->next;
35867 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35868 kgdb_tty_line = tty_line;
35869
35870 do_register:
35871 - err = kgdb_register_io_module(&kgdboc_io_ops);
35872 + if (is_console) {
35873 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
35874 + configured = 2;
35875 + } else {
35876 + err = kgdb_register_io_module(&kgdboc_io_ops);
35877 + configured = 1;
35878 + }
35879 if (err)
35880 goto noconfig;
35881
35882 - configured = 1;
35883 -
35884 return 0;
35885
35886 noconfig:
35887 @@ -212,7 +219,7 @@ noconfig:
35888 static int __init init_kgdboc(void)
35889 {
35890 /* Already configured? */
35891 - if (configured == 1)
35892 + if (configured >= 1)
35893 return 0;
35894
35895 return configure_kgdboc();
35896 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35897 if (config[len - 1] == '\n')
35898 config[len - 1] = '\0';
35899
35900 - if (configured == 1)
35901 + if (configured >= 1)
35902 cleanup_kgdboc();
35903
35904 /* Go and configure with the new params. */
35905 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35906 .post_exception = kgdboc_post_exp_handler,
35907 };
35908
35909 +static struct kgdb_io kgdboc_io_ops_console = {
35910 + .name = "kgdboc",
35911 + .read_char = kgdboc_get_char,
35912 + .write_char = kgdboc_put_char,
35913 + .pre_exception = kgdboc_pre_exp_handler,
35914 + .post_exception = kgdboc_post_exp_handler,
35915 + .is_console = 1
35916 +};
35917 +
35918 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35919 /* This is only available if kgdboc is a built in for early debugging */
35920 static int __init kgdboc_early_init(char *opt)
35921 diff -urNp linux-3.0.8/drivers/tty/serial/mfd.c linux-3.0.8/drivers/tty/serial/mfd.c
35922 --- linux-3.0.8/drivers/tty/serial/mfd.c 2011-07-21 22:17:23.000000000 -0400
35923 +++ linux-3.0.8/drivers/tty/serial/mfd.c 2011-10-11 10:44:33.000000000 -0400
35924 @@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35925 }
35926
35927 /* First 3 are UART ports, and the 4th is the DMA */
35928 -static const struct pci_device_id pci_ids[] __devinitdata = {
35929 +static const struct pci_device_id pci_ids[] __devinitconst = {
35930 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35931 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35932 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35933 diff -urNp linux-3.0.8/drivers/tty/serial/mrst_max3110.c linux-3.0.8/drivers/tty/serial/mrst_max3110.c
35934 --- linux-3.0.8/drivers/tty/serial/mrst_max3110.c 2011-10-24 08:05:30.000000000 -0400
35935 +++ linux-3.0.8/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:55:28.000000000 -0400
35936 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35937 int loop = 1, num, total = 0;
35938 u8 recv_buf[512], *pbuf;
35939
35940 + pax_track_stack();
35941 +
35942 pbuf = recv_buf;
35943 do {
35944 num = max3110_read_multi(max, pbuf);
35945 diff -urNp linux-3.0.8/drivers/tty/tty_io.c linux-3.0.8/drivers/tty/tty_io.c
35946 --- linux-3.0.8/drivers/tty/tty_io.c 2011-10-24 08:05:30.000000000 -0400
35947 +++ linux-3.0.8/drivers/tty/tty_io.c 2011-10-16 21:55:28.000000000 -0400
35948 @@ -3214,7 +3214,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35949
35950 void tty_default_fops(struct file_operations *fops)
35951 {
35952 - *fops = tty_fops;
35953 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35954 }
35955
35956 /*
35957 diff -urNp linux-3.0.8/drivers/tty/tty_ldisc.c linux-3.0.8/drivers/tty/tty_ldisc.c
35958 --- linux-3.0.8/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
35959 +++ linux-3.0.8/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
35960 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35961 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35962 struct tty_ldisc_ops *ldo = ld->ops;
35963
35964 - ldo->refcount--;
35965 + atomic_dec(&ldo->refcount);
35966 module_put(ldo->owner);
35967 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35968
35969 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35970 spin_lock_irqsave(&tty_ldisc_lock, flags);
35971 tty_ldiscs[disc] = new_ldisc;
35972 new_ldisc->num = disc;
35973 - new_ldisc->refcount = 0;
35974 + atomic_set(&new_ldisc->refcount, 0);
35975 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35976
35977 return ret;
35978 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35979 return -EINVAL;
35980
35981 spin_lock_irqsave(&tty_ldisc_lock, flags);
35982 - if (tty_ldiscs[disc]->refcount)
35983 + if (atomic_read(&tty_ldiscs[disc]->refcount))
35984 ret = -EBUSY;
35985 else
35986 tty_ldiscs[disc] = NULL;
35987 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35988 if (ldops) {
35989 ret = ERR_PTR(-EAGAIN);
35990 if (try_module_get(ldops->owner)) {
35991 - ldops->refcount++;
35992 + atomic_inc(&ldops->refcount);
35993 ret = ldops;
35994 }
35995 }
35996 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35997 unsigned long flags;
35998
35999 spin_lock_irqsave(&tty_ldisc_lock, flags);
36000 - ldops->refcount--;
36001 + atomic_dec(&ldops->refcount);
36002 module_put(ldops->owner);
36003 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36004 }
36005 diff -urNp linux-3.0.8/drivers/tty/vt/keyboard.c linux-3.0.8/drivers/tty/vt/keyboard.c
36006 --- linux-3.0.8/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
36007 +++ linux-3.0.8/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
36008 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
36009 kbd->kbdmode == VC_OFF) &&
36010 value != KVAL(K_SAK))
36011 return; /* SAK is allowed even in raw mode */
36012 +
36013 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36014 + {
36015 + void *func = fn_handler[value];
36016 + if (func == fn_show_state || func == fn_show_ptregs ||
36017 + func == fn_show_mem)
36018 + return;
36019 + }
36020 +#endif
36021 +
36022 fn_handler[value](vc);
36023 }
36024
36025 diff -urNp linux-3.0.8/drivers/tty/vt/vt.c linux-3.0.8/drivers/tty/vt/vt.c
36026 --- linux-3.0.8/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
36027 +++ linux-3.0.8/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
36028 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
36029
36030 static void notify_write(struct vc_data *vc, unsigned int unicode)
36031 {
36032 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
36033 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
36034 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
36035 }
36036
36037 diff -urNp linux-3.0.8/drivers/tty/vt/vt_ioctl.c linux-3.0.8/drivers/tty/vt/vt_ioctl.c
36038 --- linux-3.0.8/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
36039 +++ linux-3.0.8/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
36040 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36041 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36042 return -EFAULT;
36043
36044 - if (!capable(CAP_SYS_TTY_CONFIG))
36045 - perm = 0;
36046 -
36047 switch (cmd) {
36048 case KDGKBENT:
36049 key_map = key_maps[s];
36050 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36051 val = (i ? K_HOLE : K_NOSUCHMAP);
36052 return put_user(val, &user_kbe->kb_value);
36053 case KDSKBENT:
36054 + if (!capable(CAP_SYS_TTY_CONFIG))
36055 + perm = 0;
36056 +
36057 if (!perm)
36058 return -EPERM;
36059 if (!i && v == K_NOSUCHMAP) {
36060 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36061 int i, j, k;
36062 int ret;
36063
36064 - if (!capable(CAP_SYS_TTY_CONFIG))
36065 - perm = 0;
36066 -
36067 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36068 if (!kbs) {
36069 ret = -ENOMEM;
36070 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36071 kfree(kbs);
36072 return ((p && *p) ? -EOVERFLOW : 0);
36073 case KDSKBSENT:
36074 + if (!capable(CAP_SYS_TTY_CONFIG))
36075 + perm = 0;
36076 +
36077 if (!perm) {
36078 ret = -EPERM;
36079 goto reterr;
36080 diff -urNp linux-3.0.8/drivers/uio/uio.c linux-3.0.8/drivers/uio/uio.c
36081 --- linux-3.0.8/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
36082 +++ linux-3.0.8/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
36083 @@ -25,6 +25,7 @@
36084 #include <linux/kobject.h>
36085 #include <linux/cdev.h>
36086 #include <linux/uio_driver.h>
36087 +#include <asm/local.h>
36088
36089 #define UIO_MAX_DEVICES (1U << MINORBITS)
36090
36091 @@ -32,10 +33,10 @@ struct uio_device {
36092 struct module *owner;
36093 struct device *dev;
36094 int minor;
36095 - atomic_t event;
36096 + atomic_unchecked_t event;
36097 struct fasync_struct *async_queue;
36098 wait_queue_head_t wait;
36099 - int vma_count;
36100 + local_t vma_count;
36101 struct uio_info *info;
36102 struct kobject *map_dir;
36103 struct kobject *portio_dir;
36104 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
36105 struct device_attribute *attr, char *buf)
36106 {
36107 struct uio_device *idev = dev_get_drvdata(dev);
36108 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36109 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36110 }
36111
36112 static struct device_attribute uio_class_attributes[] = {
36113 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
36114 {
36115 struct uio_device *idev = info->uio_dev;
36116
36117 - atomic_inc(&idev->event);
36118 + atomic_inc_unchecked(&idev->event);
36119 wake_up_interruptible(&idev->wait);
36120 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36121 }
36122 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
36123 }
36124
36125 listener->dev = idev;
36126 - listener->event_count = atomic_read(&idev->event);
36127 + listener->event_count = atomic_read_unchecked(&idev->event);
36128 filep->private_data = listener;
36129
36130 if (idev->info->open) {
36131 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
36132 return -EIO;
36133
36134 poll_wait(filep, &idev->wait, wait);
36135 - if (listener->event_count != atomic_read(&idev->event))
36136 + if (listener->event_count != atomic_read_unchecked(&idev->event))
36137 return POLLIN | POLLRDNORM;
36138 return 0;
36139 }
36140 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
36141 do {
36142 set_current_state(TASK_INTERRUPTIBLE);
36143
36144 - event_count = atomic_read(&idev->event);
36145 + event_count = atomic_read_unchecked(&idev->event);
36146 if (event_count != listener->event_count) {
36147 if (copy_to_user(buf, &event_count, count))
36148 retval = -EFAULT;
36149 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
36150 static void uio_vma_open(struct vm_area_struct *vma)
36151 {
36152 struct uio_device *idev = vma->vm_private_data;
36153 - idev->vma_count++;
36154 + local_inc(&idev->vma_count);
36155 }
36156
36157 static void uio_vma_close(struct vm_area_struct *vma)
36158 {
36159 struct uio_device *idev = vma->vm_private_data;
36160 - idev->vma_count--;
36161 + local_dec(&idev->vma_count);
36162 }
36163
36164 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36165 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
36166 idev->owner = owner;
36167 idev->info = info;
36168 init_waitqueue_head(&idev->wait);
36169 - atomic_set(&idev->event, 0);
36170 + atomic_set_unchecked(&idev->event, 0);
36171
36172 ret = uio_get_minor(idev);
36173 if (ret)
36174 diff -urNp linux-3.0.8/drivers/usb/atm/cxacru.c linux-3.0.8/drivers/usb/atm/cxacru.c
36175 --- linux-3.0.8/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
36176 +++ linux-3.0.8/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
36177 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
36178 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36179 if (ret < 2)
36180 return -EINVAL;
36181 - if (index < 0 || index > 0x7f)
36182 + if (index > 0x7f)
36183 return -EINVAL;
36184 pos += tmp;
36185
36186 diff -urNp linux-3.0.8/drivers/usb/atm/usbatm.c linux-3.0.8/drivers/usb/atm/usbatm.c
36187 --- linux-3.0.8/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
36188 +++ linux-3.0.8/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
36189 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
36190 if (printk_ratelimit())
36191 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36192 __func__, vpi, vci);
36193 - atomic_inc(&vcc->stats->rx_err);
36194 + atomic_inc_unchecked(&vcc->stats->rx_err);
36195 return;
36196 }
36197
36198 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
36199 if (length > ATM_MAX_AAL5_PDU) {
36200 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36201 __func__, length, vcc);
36202 - atomic_inc(&vcc->stats->rx_err);
36203 + atomic_inc_unchecked(&vcc->stats->rx_err);
36204 goto out;
36205 }
36206
36207 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
36208 if (sarb->len < pdu_length) {
36209 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36210 __func__, pdu_length, sarb->len, vcc);
36211 - atomic_inc(&vcc->stats->rx_err);
36212 + atomic_inc_unchecked(&vcc->stats->rx_err);
36213 goto out;
36214 }
36215
36216 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36217 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36218 __func__, vcc);
36219 - atomic_inc(&vcc->stats->rx_err);
36220 + atomic_inc_unchecked(&vcc->stats->rx_err);
36221 goto out;
36222 }
36223
36224 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
36225 if (printk_ratelimit())
36226 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36227 __func__, length);
36228 - atomic_inc(&vcc->stats->rx_drop);
36229 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36230 goto out;
36231 }
36232
36233 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
36234
36235 vcc->push(vcc, skb);
36236
36237 - atomic_inc(&vcc->stats->rx);
36238 + atomic_inc_unchecked(&vcc->stats->rx);
36239 out:
36240 skb_trim(sarb, 0);
36241 }
36242 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
36243 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36244
36245 usbatm_pop(vcc, skb);
36246 - atomic_inc(&vcc->stats->tx);
36247 + atomic_inc_unchecked(&vcc->stats->tx);
36248
36249 skb = skb_dequeue(&instance->sndqueue);
36250 }
36251 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
36252 if (!left--)
36253 return sprintf(page,
36254 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36255 - atomic_read(&atm_dev->stats.aal5.tx),
36256 - atomic_read(&atm_dev->stats.aal5.tx_err),
36257 - atomic_read(&atm_dev->stats.aal5.rx),
36258 - atomic_read(&atm_dev->stats.aal5.rx_err),
36259 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36260 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36261 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36262 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36263 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36264 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36265
36266 if (!left--) {
36267 if (instance->disconnected)
36268 diff -urNp linux-3.0.8/drivers/usb/core/devices.c linux-3.0.8/drivers/usb/core/devices.c
36269 --- linux-3.0.8/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
36270 +++ linux-3.0.8/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
36271 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36272 * time it gets called.
36273 */
36274 static struct device_connect_event {
36275 - atomic_t count;
36276 + atomic_unchecked_t count;
36277 wait_queue_head_t wait;
36278 } device_event = {
36279 .count = ATOMIC_INIT(1),
36280 @@ -164,7 +164,7 @@ static const struct class_info clas_info
36281
36282 void usbfs_conn_disc_event(void)
36283 {
36284 - atomic_add(2, &device_event.count);
36285 + atomic_add_unchecked(2, &device_event.count);
36286 wake_up(&device_event.wait);
36287 }
36288
36289 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
36290
36291 poll_wait(file, &device_event.wait, wait);
36292
36293 - event_count = atomic_read(&device_event.count);
36294 + event_count = atomic_read_unchecked(&device_event.count);
36295 if (file->f_version != event_count) {
36296 file->f_version = event_count;
36297 return POLLIN | POLLRDNORM;
36298 diff -urNp linux-3.0.8/drivers/usb/core/message.c linux-3.0.8/drivers/usb/core/message.c
36299 --- linux-3.0.8/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
36300 +++ linux-3.0.8/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
36301 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
36302 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36303 if (buf) {
36304 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36305 - if (len > 0) {
36306 - smallbuf = kmalloc(++len, GFP_NOIO);
36307 + if (len++ > 0) {
36308 + smallbuf = kmalloc(len, GFP_NOIO);
36309 if (!smallbuf)
36310 return buf;
36311 memcpy(smallbuf, buf, len);
36312 diff -urNp linux-3.0.8/drivers/usb/early/ehci-dbgp.c linux-3.0.8/drivers/usb/early/ehci-dbgp.c
36313 --- linux-3.0.8/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
36314 +++ linux-3.0.8/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
36315 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
36316
36317 #ifdef CONFIG_KGDB
36318 static struct kgdb_io kgdbdbgp_io_ops;
36319 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36320 +static struct kgdb_io kgdbdbgp_io_ops_console;
36321 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36322 #else
36323 #define dbgp_kgdb_mode (0)
36324 #endif
36325 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
36326 .write_char = kgdbdbgp_write_char,
36327 };
36328
36329 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36330 + .name = "kgdbdbgp",
36331 + .read_char = kgdbdbgp_read_char,
36332 + .write_char = kgdbdbgp_write_char,
36333 + .is_console = 1
36334 +};
36335 +
36336 static int kgdbdbgp_wait_time;
36337
36338 static int __init kgdbdbgp_parse_config(char *str)
36339 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
36340 ptr++;
36341 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36342 }
36343 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36344 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36345 + if (early_dbgp_console.index != -1)
36346 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36347 + else
36348 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36349
36350 return 0;
36351 }
36352 diff -urNp linux-3.0.8/drivers/usb/host/xhci-mem.c linux-3.0.8/drivers/usb/host/xhci-mem.c
36353 --- linux-3.0.8/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
36354 +++ linux-3.0.8/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
36355 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
36356 unsigned int num_tests;
36357 int i, ret;
36358
36359 + pax_track_stack();
36360 +
36361 num_tests = ARRAY_SIZE(simple_test_vector);
36362 for (i = 0; i < num_tests; i++) {
36363 ret = xhci_test_trb_in_td(xhci,
36364 diff -urNp linux-3.0.8/drivers/usb/wusbcore/wa-hc.h linux-3.0.8/drivers/usb/wusbcore/wa-hc.h
36365 --- linux-3.0.8/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
36366 +++ linux-3.0.8/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
36367 @@ -192,7 +192,7 @@ struct wahc {
36368 struct list_head xfer_delayed_list;
36369 spinlock_t xfer_list_lock;
36370 struct work_struct xfer_work;
36371 - atomic_t xfer_id_count;
36372 + atomic_unchecked_t xfer_id_count;
36373 };
36374
36375
36376 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
36377 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36378 spin_lock_init(&wa->xfer_list_lock);
36379 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36380 - atomic_set(&wa->xfer_id_count, 1);
36381 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36382 }
36383
36384 /**
36385 diff -urNp linux-3.0.8/drivers/usb/wusbcore/wa-xfer.c linux-3.0.8/drivers/usb/wusbcore/wa-xfer.c
36386 --- linux-3.0.8/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
36387 +++ linux-3.0.8/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
36388 @@ -294,7 +294,7 @@ out:
36389 */
36390 static void wa_xfer_id_init(struct wa_xfer *xfer)
36391 {
36392 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36393 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36394 }
36395
36396 /*
36397 diff -urNp linux-3.0.8/drivers/vhost/vhost.c linux-3.0.8/drivers/vhost/vhost.c
36398 --- linux-3.0.8/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
36399 +++ linux-3.0.8/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
36400 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
36401 return get_user(vq->last_used_idx, &used->idx);
36402 }
36403
36404 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36405 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36406 {
36407 struct file *eventfp, *filep = NULL,
36408 *pollstart = NULL, *pollstop = NULL;
36409 diff -urNp linux-3.0.8/drivers/video/aty/aty128fb.c linux-3.0.8/drivers/video/aty/aty128fb.c
36410 --- linux-3.0.8/drivers/video/aty/aty128fb.c 2011-07-21 22:17:23.000000000 -0400
36411 +++ linux-3.0.8/drivers/video/aty/aty128fb.c 2011-10-11 10:44:33.000000000 -0400
36412 @@ -148,7 +148,7 @@ enum {
36413 };
36414
36415 /* Must match above enum */
36416 -static const char *r128_family[] __devinitdata = {
36417 +static const char *r128_family[] __devinitconst = {
36418 "AGP",
36419 "PCI",
36420 "PRO AGP",
36421 diff -urNp linux-3.0.8/drivers/video/fbcmap.c linux-3.0.8/drivers/video/fbcmap.c
36422 --- linux-3.0.8/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
36423 +++ linux-3.0.8/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
36424 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36425 rc = -ENODEV;
36426 goto out;
36427 }
36428 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36429 - !info->fbops->fb_setcmap)) {
36430 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36431 rc = -EINVAL;
36432 goto out1;
36433 }
36434 diff -urNp linux-3.0.8/drivers/video/fbmem.c linux-3.0.8/drivers/video/fbmem.c
36435 --- linux-3.0.8/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
36436 +++ linux-3.0.8/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
36437 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
36438 image->dx += image->width + 8;
36439 }
36440 } else if (rotate == FB_ROTATE_UD) {
36441 - for (x = 0; x < num && image->dx >= 0; x++) {
36442 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36443 info->fbops->fb_imageblit(info, image);
36444 image->dx -= image->width + 8;
36445 }
36446 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
36447 image->dy += image->height + 8;
36448 }
36449 } else if (rotate == FB_ROTATE_CCW) {
36450 - for (x = 0; x < num && image->dy >= 0; x++) {
36451 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36452 info->fbops->fb_imageblit(info, image);
36453 image->dy -= image->height + 8;
36454 }
36455 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
36456 int flags = info->flags;
36457 int ret = 0;
36458
36459 + pax_track_stack();
36460 +
36461 if (var->activate & FB_ACTIVATE_INV_MODE) {
36462 struct fb_videomode mode1, mode2;
36463
36464 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
36465 void __user *argp = (void __user *)arg;
36466 long ret = 0;
36467
36468 + pax_track_stack();
36469 +
36470 switch (cmd) {
36471 case FBIOGET_VSCREENINFO:
36472 if (!lock_fb_info(info))
36473 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
36474 return -EFAULT;
36475 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36476 return -EINVAL;
36477 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36478 + if (con2fb.framebuffer >= FB_MAX)
36479 return -EINVAL;
36480 if (!registered_fb[con2fb.framebuffer])
36481 request_module("fb%d", con2fb.framebuffer);
36482 diff -urNp linux-3.0.8/drivers/video/geode/gx1fb_core.c linux-3.0.8/drivers/video/geode/gx1fb_core.c
36483 --- linux-3.0.8/drivers/video/geode/gx1fb_core.c 2011-07-21 22:17:23.000000000 -0400
36484 +++ linux-3.0.8/drivers/video/geode/gx1fb_core.c 2011-10-11 10:44:33.000000000 -0400
36485 @@ -29,7 +29,7 @@ static int crt_option = 1;
36486 static char panel_option[32] = "";
36487
36488 /* Modes relevant to the GX1 (taken from modedb.c) */
36489 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36490 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36491 /* 640x480-60 VESA */
36492 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36493 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36494 diff -urNp linux-3.0.8/drivers/video/gxt4500.c linux-3.0.8/drivers/video/gxt4500.c
36495 --- linux-3.0.8/drivers/video/gxt4500.c 2011-07-21 22:17:23.000000000 -0400
36496 +++ linux-3.0.8/drivers/video/gxt4500.c 2011-10-11 10:44:33.000000000 -0400
36497 @@ -156,7 +156,7 @@ struct gxt4500_par {
36498 static char *mode_option;
36499
36500 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36501 -static const struct fb_videomode defaultmode __devinitdata = {
36502 +static const struct fb_videomode defaultmode __devinitconst = {
36503 .refresh = 60,
36504 .xres = 1280,
36505 .yres = 1024,
36506 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
36507 return 0;
36508 }
36509
36510 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36511 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36512 .id = "IBM GXT4500P",
36513 .type = FB_TYPE_PACKED_PIXELS,
36514 .visual = FB_VISUAL_PSEUDOCOLOR,
36515 diff -urNp linux-3.0.8/drivers/video/i810/i810_accel.c linux-3.0.8/drivers/video/i810/i810_accel.c
36516 --- linux-3.0.8/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
36517 +++ linux-3.0.8/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
36518 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36519 }
36520 }
36521 printk("ringbuffer lockup!!!\n");
36522 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36523 i810_report_error(mmio);
36524 par->dev_flags |= LOCKUP;
36525 info->pixmap.scan_align = 1;
36526 diff -urNp linux-3.0.8/drivers/video/i810/i810_main.c linux-3.0.8/drivers/video/i810/i810_main.c
36527 --- linux-3.0.8/drivers/video/i810/i810_main.c 2011-07-21 22:17:23.000000000 -0400
36528 +++ linux-3.0.8/drivers/video/i810/i810_main.c 2011-10-11 10:44:33.000000000 -0400
36529 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
36530 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36531
36532 /* PCI */
36533 -static const char *i810_pci_list[] __devinitdata = {
36534 +static const char *i810_pci_list[] __devinitconst = {
36535 "Intel(R) 810 Framebuffer Device" ,
36536 "Intel(R) 810-DC100 Framebuffer Device" ,
36537 "Intel(R) 810E Framebuffer Device" ,
36538 diff -urNp linux-3.0.8/drivers/video/jz4740_fb.c linux-3.0.8/drivers/video/jz4740_fb.c
36539 --- linux-3.0.8/drivers/video/jz4740_fb.c 2011-07-21 22:17:23.000000000 -0400
36540 +++ linux-3.0.8/drivers/video/jz4740_fb.c 2011-10-11 10:44:33.000000000 -0400
36541 @@ -136,7 +136,7 @@ struct jzfb {
36542 uint32_t pseudo_palette[16];
36543 };
36544
36545 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36546 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36547 .id = "JZ4740 FB",
36548 .type = FB_TYPE_PACKED_PIXELS,
36549 .visual = FB_VISUAL_TRUECOLOR,
36550 diff -urNp linux-3.0.8/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.8/drivers/video/logo/logo_linux_clut224.ppm
36551 --- linux-3.0.8/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
36552 +++ linux-3.0.8/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
36553 @@ -1,1604 +1,1123 @@
36554 P3
36555 -# Standard 224-color Linux logo
36556 80 80
36557 255
36558 - 0 0 0 0 0 0 0 0 0 0 0 0
36559 - 0 0 0 0 0 0 0 0 0 0 0 0
36560 - 0 0 0 0 0 0 0 0 0 0 0 0
36561 - 0 0 0 0 0 0 0 0 0 0 0 0
36562 - 0 0 0 0 0 0 0 0 0 0 0 0
36563 - 0 0 0 0 0 0 0 0 0 0 0 0
36564 - 0 0 0 0 0 0 0 0 0 0 0 0
36565 - 0 0 0 0 0 0 0 0 0 0 0 0
36566 - 0 0 0 0 0 0 0 0 0 0 0 0
36567 - 6 6 6 6 6 6 10 10 10 10 10 10
36568 - 10 10 10 6 6 6 6 6 6 6 6 6
36569 - 0 0 0 0 0 0 0 0 0 0 0 0
36570 - 0 0 0 0 0 0 0 0 0 0 0 0
36571 - 0 0 0 0 0 0 0 0 0 0 0 0
36572 - 0 0 0 0 0 0 0 0 0 0 0 0
36573 - 0 0 0 0 0 0 0 0 0 0 0 0
36574 - 0 0 0 0 0 0 0 0 0 0 0 0
36575 - 0 0 0 0 0 0 0 0 0 0 0 0
36576 - 0 0 0 0 0 0 0 0 0 0 0 0
36577 - 0 0 0 0 0 0 0 0 0 0 0 0
36578 - 0 0 0 0 0 0 0 0 0 0 0 0
36579 - 0 0 0 0 0 0 0 0 0 0 0 0
36580 - 0 0 0 0 0 0 0 0 0 0 0 0
36581 - 0 0 0 0 0 0 0 0 0 0 0 0
36582 - 0 0 0 0 0 0 0 0 0 0 0 0
36583 - 0 0 0 0 0 0 0 0 0 0 0 0
36584 - 0 0 0 0 0 0 0 0 0 0 0 0
36585 - 0 0 0 0 0 0 0 0 0 0 0 0
36586 - 0 0 0 6 6 6 10 10 10 14 14 14
36587 - 22 22 22 26 26 26 30 30 30 34 34 34
36588 - 30 30 30 30 30 30 26 26 26 18 18 18
36589 - 14 14 14 10 10 10 6 6 6 0 0 0
36590 - 0 0 0 0 0 0 0 0 0 0 0 0
36591 - 0 0 0 0 0 0 0 0 0 0 0 0
36592 - 0 0 0 0 0 0 0 0 0 0 0 0
36593 - 0 0 0 0 0 0 0 0 0 0 0 0
36594 - 0 0 0 0 0 0 0 0 0 0 0 0
36595 - 0 0 0 0 0 0 0 0 0 0 0 0
36596 - 0 0 0 0 0 0 0 0 0 0 0 0
36597 - 0 0 0 0 0 0 0 0 0 0 0 0
36598 - 0 0 0 0 0 0 0 0 0 0 0 0
36599 - 0 0 0 0 0 1 0 0 1 0 0 0
36600 - 0 0 0 0 0 0 0 0 0 0 0 0
36601 - 0 0 0 0 0 0 0 0 0 0 0 0
36602 - 0 0 0 0 0 0 0 0 0 0 0 0
36603 - 0 0 0 0 0 0 0 0 0 0 0 0
36604 - 0 0 0 0 0 0 0 0 0 0 0 0
36605 - 0 0 0 0 0 0 0 0 0 0 0 0
36606 - 6 6 6 14 14 14 26 26 26 42 42 42
36607 - 54 54 54 66 66 66 78 78 78 78 78 78
36608 - 78 78 78 74 74 74 66 66 66 54 54 54
36609 - 42 42 42 26 26 26 18 18 18 10 10 10
36610 - 6 6 6 0 0 0 0 0 0 0 0 0
36611 - 0 0 0 0 0 0 0 0 0 0 0 0
36612 - 0 0 0 0 0 0 0 0 0 0 0 0
36613 - 0 0 0 0 0 0 0 0 0 0 0 0
36614 - 0 0 0 0 0 0 0 0 0 0 0 0
36615 - 0 0 0 0 0 0 0 0 0 0 0 0
36616 - 0 0 0 0 0 0 0 0 0 0 0 0
36617 - 0 0 0 0 0 0 0 0 0 0 0 0
36618 - 0 0 0 0 0 0 0 0 0 0 0 0
36619 - 0 0 1 0 0 0 0 0 0 0 0 0
36620 - 0 0 0 0 0 0 0 0 0 0 0 0
36621 - 0 0 0 0 0 0 0 0 0 0 0 0
36622 - 0 0 0 0 0 0 0 0 0 0 0 0
36623 - 0 0 0 0 0 0 0 0 0 0 0 0
36624 - 0 0 0 0 0 0 0 0 0 0 0 0
36625 - 0 0 0 0 0 0 0 0 0 10 10 10
36626 - 22 22 22 42 42 42 66 66 66 86 86 86
36627 - 66 66 66 38 38 38 38 38 38 22 22 22
36628 - 26 26 26 34 34 34 54 54 54 66 66 66
36629 - 86 86 86 70 70 70 46 46 46 26 26 26
36630 - 14 14 14 6 6 6 0 0 0 0 0 0
36631 - 0 0 0 0 0 0 0 0 0 0 0 0
36632 - 0 0 0 0 0 0 0 0 0 0 0 0
36633 - 0 0 0 0 0 0 0 0 0 0 0 0
36634 - 0 0 0 0 0 0 0 0 0 0 0 0
36635 - 0 0 0 0 0 0 0 0 0 0 0 0
36636 - 0 0 0 0 0 0 0 0 0 0 0 0
36637 - 0 0 0 0 0 0 0 0 0 0 0 0
36638 - 0 0 0 0 0 0 0 0 0 0 0 0
36639 - 0 0 1 0 0 1 0 0 1 0 0 0
36640 - 0 0 0 0 0 0 0 0 0 0 0 0
36641 - 0 0 0 0 0 0 0 0 0 0 0 0
36642 - 0 0 0 0 0 0 0 0 0 0 0 0
36643 - 0 0 0 0 0 0 0 0 0 0 0 0
36644 - 0 0 0 0 0 0 0 0 0 0 0 0
36645 - 0 0 0 0 0 0 10 10 10 26 26 26
36646 - 50 50 50 82 82 82 58 58 58 6 6 6
36647 - 2 2 6 2 2 6 2 2 6 2 2 6
36648 - 2 2 6 2 2 6 2 2 6 2 2 6
36649 - 6 6 6 54 54 54 86 86 86 66 66 66
36650 - 38 38 38 18 18 18 6 6 6 0 0 0
36651 - 0 0 0 0 0 0 0 0 0 0 0 0
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 0 0 0 0 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 0 0 0
36656 - 0 0 0 0 0 0 0 0 0 0 0 0
36657 - 0 0 0 0 0 0 0 0 0 0 0 0
36658 - 0 0 0 0 0 0 0 0 0 0 0 0
36659 - 0 0 0 0 0 0 0 0 0 0 0 0
36660 - 0 0 0 0 0 0 0 0 0 0 0 0
36661 - 0 0 0 0 0 0 0 0 0 0 0 0
36662 - 0 0 0 0 0 0 0 0 0 0 0 0
36663 - 0 0 0 0 0 0 0 0 0 0 0 0
36664 - 0 0 0 0 0 0 0 0 0 0 0 0
36665 - 0 0 0 6 6 6 22 22 22 50 50 50
36666 - 78 78 78 34 34 34 2 2 6 2 2 6
36667 - 2 2 6 2 2 6 2 2 6 2 2 6
36668 - 2 2 6 2 2 6 2 2 6 2 2 6
36669 - 2 2 6 2 2 6 6 6 6 70 70 70
36670 - 78 78 78 46 46 46 22 22 22 6 6 6
36671 - 0 0 0 0 0 0 0 0 0 0 0 0
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 0 0 0 0 0 0 0 0 0
36677 - 0 0 0 0 0 0 0 0 0 0 0 0
36678 - 0 0 0 0 0 0 0 0 0 0 0 0
36679 - 0 0 1 0 0 1 0 0 1 0 0 0
36680 - 0 0 0 0 0 0 0 0 0 0 0 0
36681 - 0 0 0 0 0 0 0 0 0 0 0 0
36682 - 0 0 0 0 0 0 0 0 0 0 0 0
36683 - 0 0 0 0 0 0 0 0 0 0 0 0
36684 - 0 0 0 0 0 0 0 0 0 0 0 0
36685 - 6 6 6 18 18 18 42 42 42 82 82 82
36686 - 26 26 26 2 2 6 2 2 6 2 2 6
36687 - 2 2 6 2 2 6 2 2 6 2 2 6
36688 - 2 2 6 2 2 6 2 2 6 14 14 14
36689 - 46 46 46 34 34 34 6 6 6 2 2 6
36690 - 42 42 42 78 78 78 42 42 42 18 18 18
36691 - 6 6 6 0 0 0 0 0 0 0 0 0
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 0 0 0 0 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 0 0 0 0 0 0 0 0 0 0 0 0
36697 - 0 0 0 0 0 0 0 0 0 0 0 0
36698 - 0 0 0 0 0 0 0 0 0 0 0 0
36699 - 0 0 1 0 0 0 0 0 1 0 0 0
36700 - 0 0 0 0 0 0 0 0 0 0 0 0
36701 - 0 0 0 0 0 0 0 0 0 0 0 0
36702 - 0 0 0 0 0 0 0 0 0 0 0 0
36703 - 0 0 0 0 0 0 0 0 0 0 0 0
36704 - 0 0 0 0 0 0 0 0 0 0 0 0
36705 - 10 10 10 30 30 30 66 66 66 58 58 58
36706 - 2 2 6 2 2 6 2 2 6 2 2 6
36707 - 2 2 6 2 2 6 2 2 6 2 2 6
36708 - 2 2 6 2 2 6 2 2 6 26 26 26
36709 - 86 86 86 101 101 101 46 46 46 10 10 10
36710 - 2 2 6 58 58 58 70 70 70 34 34 34
36711 - 10 10 10 0 0 0 0 0 0 0 0 0
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 0 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 0 0 0 0 0 0 0 0 0 0 0 0
36717 - 0 0 0 0 0 0 0 0 0 0 0 0
36718 - 0 0 0 0 0 0 0 0 0 0 0 0
36719 - 0 0 1 0 0 1 0 0 1 0 0 0
36720 - 0 0 0 0 0 0 0 0 0 0 0 0
36721 - 0 0 0 0 0 0 0 0 0 0 0 0
36722 - 0 0 0 0 0 0 0 0 0 0 0 0
36723 - 0 0 0 0 0 0 0 0 0 0 0 0
36724 - 0 0 0 0 0 0 0 0 0 0 0 0
36725 - 14 14 14 42 42 42 86 86 86 10 10 10
36726 - 2 2 6 2 2 6 2 2 6 2 2 6
36727 - 2 2 6 2 2 6 2 2 6 2 2 6
36728 - 2 2 6 2 2 6 2 2 6 30 30 30
36729 - 94 94 94 94 94 94 58 58 58 26 26 26
36730 - 2 2 6 6 6 6 78 78 78 54 54 54
36731 - 22 22 22 6 6 6 0 0 0 0 0 0
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 0 0 0 0 0 0 0 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 0 0 0 0 0 0 0 0 0 0 0 0
36737 - 0 0 0 0 0 0 0 0 0 0 0 0
36738 - 0 0 0 0 0 0 0 0 0 0 0 0
36739 - 0 0 0 0 0 0 0 0 0 0 0 0
36740 - 0 0 0 0 0 0 0 0 0 0 0 0
36741 - 0 0 0 0 0 0 0 0 0 0 0 0
36742 - 0 0 0 0 0 0 0 0 0 0 0 0
36743 - 0 0 0 0 0 0 0 0 0 0 0 0
36744 - 0 0 0 0 0 0 0 0 0 6 6 6
36745 - 22 22 22 62 62 62 62 62 62 2 2 6
36746 - 2 2 6 2 2 6 2 2 6 2 2 6
36747 - 2 2 6 2 2 6 2 2 6 2 2 6
36748 - 2 2 6 2 2 6 2 2 6 26 26 26
36749 - 54 54 54 38 38 38 18 18 18 10 10 10
36750 - 2 2 6 2 2 6 34 34 34 82 82 82
36751 - 38 38 38 14 14 14 0 0 0 0 0 0
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 0 0 0
36756 - 0 0 0 0 0 0 0 0 0 0 0 0
36757 - 0 0 0 0 0 0 0 0 0 0 0 0
36758 - 0 0 0 0 0 0 0 0 0 0 0 0
36759 - 0 0 0 0 0 1 0 0 1 0 0 0
36760 - 0 0 0 0 0 0 0 0 0 0 0 0
36761 - 0 0 0 0 0 0 0 0 0 0 0 0
36762 - 0 0 0 0 0 0 0 0 0 0 0 0
36763 - 0 0 0 0 0 0 0 0 0 0 0 0
36764 - 0 0 0 0 0 0 0 0 0 6 6 6
36765 - 30 30 30 78 78 78 30 30 30 2 2 6
36766 - 2 2 6 2 2 6 2 2 6 2 2 6
36767 - 2 2 6 2 2 6 2 2 6 2 2 6
36768 - 2 2 6 2 2 6 2 2 6 10 10 10
36769 - 10 10 10 2 2 6 2 2 6 2 2 6
36770 - 2 2 6 2 2 6 2 2 6 78 78 78
36771 - 50 50 50 18 18 18 6 6 6 0 0 0
36772 - 0 0 0 0 0 0 0 0 0 0 0 0
36773 - 0 0 0 0 0 0 0 0 0 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 0 0 0
36776 - 0 0 0 0 0 0 0 0 0 0 0 0
36777 - 0 0 0 0 0 0 0 0 0 0 0 0
36778 - 0 0 0 0 0 0 0 0 0 0 0 0
36779 - 0 0 1 0 0 0 0 0 0 0 0 0
36780 - 0 0 0 0 0 0 0 0 0 0 0 0
36781 - 0 0 0 0 0 0 0 0 0 0 0 0
36782 - 0 0 0 0 0 0 0 0 0 0 0 0
36783 - 0 0 0 0 0 0 0 0 0 0 0 0
36784 - 0 0 0 0 0 0 0 0 0 10 10 10
36785 - 38 38 38 86 86 86 14 14 14 2 2 6
36786 - 2 2 6 2 2 6 2 2 6 2 2 6
36787 - 2 2 6 2 2 6 2 2 6 2 2 6
36788 - 2 2 6 2 2 6 2 2 6 2 2 6
36789 - 2 2 6 2 2 6 2 2 6 2 2 6
36790 - 2 2 6 2 2 6 2 2 6 54 54 54
36791 - 66 66 66 26 26 26 6 6 6 0 0 0
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 0 0 0 0 0 0 0 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 0 0 0
36796 - 0 0 0 0 0 0 0 0 0 0 0 0
36797 - 0 0 0 0 0 0 0 0 0 0 0 0
36798 - 0 0 0 0 0 0 0 0 0 0 0 0
36799 - 0 0 0 0 0 1 0 0 1 0 0 0
36800 - 0 0 0 0 0 0 0 0 0 0 0 0
36801 - 0 0 0 0 0 0 0 0 0 0 0 0
36802 - 0 0 0 0 0 0 0 0 0 0 0 0
36803 - 0 0 0 0 0 0 0 0 0 0 0 0
36804 - 0 0 0 0 0 0 0 0 0 14 14 14
36805 - 42 42 42 82 82 82 2 2 6 2 2 6
36806 - 2 2 6 6 6 6 10 10 10 2 2 6
36807 - 2 2 6 2 2 6 2 2 6 2 2 6
36808 - 2 2 6 2 2 6 2 2 6 6 6 6
36809 - 14 14 14 10 10 10 2 2 6 2 2 6
36810 - 2 2 6 2 2 6 2 2 6 18 18 18
36811 - 82 82 82 34 34 34 10 10 10 0 0 0
36812 - 0 0 0 0 0 0 0 0 0 0 0 0
36813 - 0 0 0 0 0 0 0 0 0 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 0 0 0
36816 - 0 0 0 0 0 0 0 0 0 0 0 0
36817 - 0 0 0 0 0 0 0 0 0 0 0 0
36818 - 0 0 0 0 0 0 0 0 0 0 0 0
36819 - 0 0 1 0 0 0 0 0 0 0 0 0
36820 - 0 0 0 0 0 0 0 0 0 0 0 0
36821 - 0 0 0 0 0 0 0 0 0 0 0 0
36822 - 0 0 0 0 0 0 0 0 0 0 0 0
36823 - 0 0 0 0 0 0 0 0 0 0 0 0
36824 - 0 0 0 0 0 0 0 0 0 14 14 14
36825 - 46 46 46 86 86 86 2 2 6 2 2 6
36826 - 6 6 6 6 6 6 22 22 22 34 34 34
36827 - 6 6 6 2 2 6 2 2 6 2 2 6
36828 - 2 2 6 2 2 6 18 18 18 34 34 34
36829 - 10 10 10 50 50 50 22 22 22 2 2 6
36830 - 2 2 6 2 2 6 2 2 6 10 10 10
36831 - 86 86 86 42 42 42 14 14 14 0 0 0
36832 - 0 0 0 0 0 0 0 0 0 0 0 0
36833 - 0 0 0 0 0 0 0 0 0 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 0 0 0
36836 - 0 0 0 0 0 0 0 0 0 0 0 0
36837 - 0 0 0 0 0 0 0 0 0 0 0 0
36838 - 0 0 0 0 0 0 0 0 0 0 0 0
36839 - 0 0 1 0 0 1 0 0 1 0 0 0
36840 - 0 0 0 0 0 0 0 0 0 0 0 0
36841 - 0 0 0 0 0 0 0 0 0 0 0 0
36842 - 0 0 0 0 0 0 0 0 0 0 0 0
36843 - 0 0 0 0 0 0 0 0 0 0 0 0
36844 - 0 0 0 0 0 0 0 0 0 14 14 14
36845 - 46 46 46 86 86 86 2 2 6 2 2 6
36846 - 38 38 38 116 116 116 94 94 94 22 22 22
36847 - 22 22 22 2 2 6 2 2 6 2 2 6
36848 - 14 14 14 86 86 86 138 138 138 162 162 162
36849 -154 154 154 38 38 38 26 26 26 6 6 6
36850 - 2 2 6 2 2 6 2 2 6 2 2 6
36851 - 86 86 86 46 46 46 14 14 14 0 0 0
36852 - 0 0 0 0 0 0 0 0 0 0 0 0
36853 - 0 0 0 0 0 0 0 0 0 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 0 0 0
36856 - 0 0 0 0 0 0 0 0 0 0 0 0
36857 - 0 0 0 0 0 0 0 0 0 0 0 0
36858 - 0 0 0 0 0 0 0 0 0 0 0 0
36859 - 0 0 0 0 0 0 0 0 0 0 0 0
36860 - 0 0 0 0 0 0 0 0 0 0 0 0
36861 - 0 0 0 0 0 0 0 0 0 0 0 0
36862 - 0 0 0 0 0 0 0 0 0 0 0 0
36863 - 0 0 0 0 0 0 0 0 0 0 0 0
36864 - 0 0 0 0 0 0 0 0 0 14 14 14
36865 - 46 46 46 86 86 86 2 2 6 14 14 14
36866 -134 134 134 198 198 198 195 195 195 116 116 116
36867 - 10 10 10 2 2 6 2 2 6 6 6 6
36868 -101 98 89 187 187 187 210 210 210 218 218 218
36869 -214 214 214 134 134 134 14 14 14 6 6 6
36870 - 2 2 6 2 2 6 2 2 6 2 2 6
36871 - 86 86 86 50 50 50 18 18 18 6 6 6
36872 - 0 0 0 0 0 0 0 0 0 0 0 0
36873 - 0 0 0 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 0 0 0
36876 - 0 0 0 0 0 0 0 0 0 0 0 0
36877 - 0 0 0 0 0 0 0 0 0 0 0 0
36878 - 0 0 0 0 0 0 0 0 1 0 0 0
36879 - 0 0 1 0 0 1 0 0 1 0 0 0
36880 - 0 0 0 0 0 0 0 0 0 0 0 0
36881 - 0 0 0 0 0 0 0 0 0 0 0 0
36882 - 0 0 0 0 0 0 0 0 0 0 0 0
36883 - 0 0 0 0 0 0 0 0 0 0 0 0
36884 - 0 0 0 0 0 0 0 0 0 14 14 14
36885 - 46 46 46 86 86 86 2 2 6 54 54 54
36886 -218 218 218 195 195 195 226 226 226 246 246 246
36887 - 58 58 58 2 2 6 2 2 6 30 30 30
36888 -210 210 210 253 253 253 174 174 174 123 123 123
36889 -221 221 221 234 234 234 74 74 74 2 2 6
36890 - 2 2 6 2 2 6 2 2 6 2 2 6
36891 - 70 70 70 58 58 58 22 22 22 6 6 6
36892 - 0 0 0 0 0 0 0 0 0 0 0 0
36893 - 0 0 0 0 0 0 0 0 0 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 0 0 0
36896 - 0 0 0 0 0 0 0 0 0 0 0 0
36897 - 0 0 0 0 0 0 0 0 0 0 0 0
36898 - 0 0 0 0 0 0 0 0 0 0 0 0
36899 - 0 0 0 0 0 0 0 0 0 0 0 0
36900 - 0 0 0 0 0 0 0 0 0 0 0 0
36901 - 0 0 0 0 0 0 0 0 0 0 0 0
36902 - 0 0 0 0 0 0 0 0 0 0 0 0
36903 - 0 0 0 0 0 0 0 0 0 0 0 0
36904 - 0 0 0 0 0 0 0 0 0 14 14 14
36905 - 46 46 46 82 82 82 2 2 6 106 106 106
36906 -170 170 170 26 26 26 86 86 86 226 226 226
36907 -123 123 123 10 10 10 14 14 14 46 46 46
36908 -231 231 231 190 190 190 6 6 6 70 70 70
36909 - 90 90 90 238 238 238 158 158 158 2 2 6
36910 - 2 2 6 2 2 6 2 2 6 2 2 6
36911 - 70 70 70 58 58 58 22 22 22 6 6 6
36912 - 0 0 0 0 0 0 0 0 0 0 0 0
36913 - 0 0 0 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 0 0 0
36916 - 0 0 0 0 0 0 0 0 0 0 0 0
36917 - 0 0 0 0 0 0 0 0 0 0 0 0
36918 - 0 0 0 0 0 0 0 0 1 0 0 0
36919 - 0 0 1 0 0 1 0 0 1 0 0 0
36920 - 0 0 0 0 0 0 0 0 0 0 0 0
36921 - 0 0 0 0 0 0 0 0 0 0 0 0
36922 - 0 0 0 0 0 0 0 0 0 0 0 0
36923 - 0 0 0 0 0 0 0 0 0 0 0 0
36924 - 0 0 0 0 0 0 0 0 0 14 14 14
36925 - 42 42 42 86 86 86 6 6 6 116 116 116
36926 -106 106 106 6 6 6 70 70 70 149 149 149
36927 -128 128 128 18 18 18 38 38 38 54 54 54
36928 -221 221 221 106 106 106 2 2 6 14 14 14
36929 - 46 46 46 190 190 190 198 198 198 2 2 6
36930 - 2 2 6 2 2 6 2 2 6 2 2 6
36931 - 74 74 74 62 62 62 22 22 22 6 6 6
36932 - 0 0 0 0 0 0 0 0 0 0 0 0
36933 - 0 0 0 0 0 0 0 0 0 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 0 0 0
36936 - 0 0 0 0 0 0 0 0 0 0 0 0
36937 - 0 0 0 0 0 0 0 0 0 0 0 0
36938 - 0 0 0 0 0 0 0 0 1 0 0 0
36939 - 0 0 1 0 0 0 0 0 1 0 0 0
36940 - 0 0 0 0 0 0 0 0 0 0 0 0
36941 - 0 0 0 0 0 0 0 0 0 0 0 0
36942 - 0 0 0 0 0 0 0 0 0 0 0 0
36943 - 0 0 0 0 0 0 0 0 0 0 0 0
36944 - 0 0 0 0 0 0 0 0 0 14 14 14
36945 - 42 42 42 94 94 94 14 14 14 101 101 101
36946 -128 128 128 2 2 6 18 18 18 116 116 116
36947 -118 98 46 121 92 8 121 92 8 98 78 10
36948 -162 162 162 106 106 106 2 2 6 2 2 6
36949 - 2 2 6 195 195 195 195 195 195 6 6 6
36950 - 2 2 6 2 2 6 2 2 6 2 2 6
36951 - 74 74 74 62 62 62 22 22 22 6 6 6
36952 - 0 0 0 0 0 0 0 0 0 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 0 0 0
36956 - 0 0 0 0 0 0 0 0 0 0 0 0
36957 - 0 0 0 0 0 0 0 0 0 0 0 0
36958 - 0 0 0 0 0 0 0 0 1 0 0 1
36959 - 0 0 1 0 0 0 0 0 1 0 0 0
36960 - 0 0 0 0 0 0 0 0 0 0 0 0
36961 - 0 0 0 0 0 0 0 0 0 0 0 0
36962 - 0 0 0 0 0 0 0 0 0 0 0 0
36963 - 0 0 0 0 0 0 0 0 0 0 0 0
36964 - 0 0 0 0 0 0 0 0 0 10 10 10
36965 - 38 38 38 90 90 90 14 14 14 58 58 58
36966 -210 210 210 26 26 26 54 38 6 154 114 10
36967 -226 170 11 236 186 11 225 175 15 184 144 12
36968 -215 174 15 175 146 61 37 26 9 2 2 6
36969 - 70 70 70 246 246 246 138 138 138 2 2 6
36970 - 2 2 6 2 2 6 2 2 6 2 2 6
36971 - 70 70 70 66 66 66 26 26 26 6 6 6
36972 - 0 0 0 0 0 0 0 0 0 0 0 0
36973 - 0 0 0 0 0 0 0 0 0 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 0 0 0
36975 - 0 0 0 0 0 0 0 0 0 0 0 0
36976 - 0 0 0 0 0 0 0 0 0 0 0 0
36977 - 0 0 0 0 0 0 0 0 0 0 0 0
36978 - 0 0 0 0 0 0 0 0 0 0 0 0
36979 - 0 0 0 0 0 0 0 0 0 0 0 0
36980 - 0 0 0 0 0 0 0 0 0 0 0 0
36981 - 0 0 0 0 0 0 0 0 0 0 0 0
36982 - 0 0 0 0 0 0 0 0 0 0 0 0
36983 - 0 0 0 0 0 0 0 0 0 0 0 0
36984 - 0 0 0 0 0 0 0 0 0 10 10 10
36985 - 38 38 38 86 86 86 14 14 14 10 10 10
36986 -195 195 195 188 164 115 192 133 9 225 175 15
36987 -239 182 13 234 190 10 232 195 16 232 200 30
36988 -245 207 45 241 208 19 232 195 16 184 144 12
36989 -218 194 134 211 206 186 42 42 42 2 2 6
36990 - 2 2 6 2 2 6 2 2 6 2 2 6
36991 - 50 50 50 74 74 74 30 30 30 6 6 6
36992 - 0 0 0 0 0 0 0 0 0 0 0 0
36993 - 0 0 0 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 0 0 0 0 0 0
36995 - 0 0 0 0 0 0 0 0 0 0 0 0
36996 - 0 0 0 0 0 0 0 0 0 0 0 0
36997 - 0 0 0 0 0 0 0 0 0 0 0 0
36998 - 0 0 0 0 0 0 0 0 0 0 0 0
36999 - 0 0 0 0 0 0 0 0 0 0 0 0
37000 - 0 0 0 0 0 0 0 0 0 0 0 0
37001 - 0 0 0 0 0 0 0 0 0 0 0 0
37002 - 0 0 0 0 0 0 0 0 0 0 0 0
37003 - 0 0 0 0 0 0 0 0 0 0 0 0
37004 - 0 0 0 0 0 0 0 0 0 10 10 10
37005 - 34 34 34 86 86 86 14 14 14 2 2 6
37006 -121 87 25 192 133 9 219 162 10 239 182 13
37007 -236 186 11 232 195 16 241 208 19 244 214 54
37008 -246 218 60 246 218 38 246 215 20 241 208 19
37009 -241 208 19 226 184 13 121 87 25 2 2 6
37010 - 2 2 6 2 2 6 2 2 6 2 2 6
37011 - 50 50 50 82 82 82 34 34 34 10 10 10
37012 - 0 0 0 0 0 0 0 0 0 0 0 0
37013 - 0 0 0 0 0 0 0 0 0 0 0 0
37014 - 0 0 0 0 0 0 0 0 0 0 0 0
37015 - 0 0 0 0 0 0 0 0 0 0 0 0
37016 - 0 0 0 0 0 0 0 0 0 0 0 0
37017 - 0 0 0 0 0 0 0 0 0 0 0 0
37018 - 0 0 0 0 0 0 0 0 0 0 0 0
37019 - 0 0 0 0 0 0 0 0 0 0 0 0
37020 - 0 0 0 0 0 0 0 0 0 0 0 0
37021 - 0 0 0 0 0 0 0 0 0 0 0 0
37022 - 0 0 0 0 0 0 0 0 0 0 0 0
37023 - 0 0 0 0 0 0 0 0 0 0 0 0
37024 - 0 0 0 0 0 0 0 0 0 10 10 10
37025 - 34 34 34 82 82 82 30 30 30 61 42 6
37026 -180 123 7 206 145 10 230 174 11 239 182 13
37027 -234 190 10 238 202 15 241 208 19 246 218 74
37028 -246 218 38 246 215 20 246 215 20 246 215 20
37029 -226 184 13 215 174 15 184 144 12 6 6 6
37030 - 2 2 6 2 2 6 2 2 6 2 2 6
37031 - 26 26 26 94 94 94 42 42 42 14 14 14
37032 - 0 0 0 0 0 0 0 0 0 0 0 0
37033 - 0 0 0 0 0 0 0 0 0 0 0 0
37034 - 0 0 0 0 0 0 0 0 0 0 0 0
37035 - 0 0 0 0 0 0 0 0 0 0 0 0
37036 - 0 0 0 0 0 0 0 0 0 0 0 0
37037 - 0 0 0 0 0 0 0 0 0 0 0 0
37038 - 0 0 0 0 0 0 0 0 0 0 0 0
37039 - 0 0 0 0 0 0 0 0 0 0 0 0
37040 - 0 0 0 0 0 0 0 0 0 0 0 0
37041 - 0 0 0 0 0 0 0 0 0 0 0 0
37042 - 0 0 0 0 0 0 0 0 0 0 0 0
37043 - 0 0 0 0 0 0 0 0 0 0 0 0
37044 - 0 0 0 0 0 0 0 0 0 10 10 10
37045 - 30 30 30 78 78 78 50 50 50 104 69 6
37046 -192 133 9 216 158 10 236 178 12 236 186 11
37047 -232 195 16 241 208 19 244 214 54 245 215 43
37048 -246 215 20 246 215 20 241 208 19 198 155 10
37049 -200 144 11 216 158 10 156 118 10 2 2 6
37050 - 2 2 6 2 2 6 2 2 6 2 2 6
37051 - 6 6 6 90 90 90 54 54 54 18 18 18
37052 - 6 6 6 0 0 0 0 0 0 0 0 0
37053 - 0 0 0 0 0 0 0 0 0 0 0 0
37054 - 0 0 0 0 0 0 0 0 0 0 0 0
37055 - 0 0 0 0 0 0 0 0 0 0 0 0
37056 - 0 0 0 0 0 0 0 0 0 0 0 0
37057 - 0 0 0 0 0 0 0 0 0 0 0 0
37058 - 0 0 0 0 0 0 0 0 0 0 0 0
37059 - 0 0 0 0 0 0 0 0 0 0 0 0
37060 - 0 0 0 0 0 0 0 0 0 0 0 0
37061 - 0 0 0 0 0 0 0 0 0 0 0 0
37062 - 0 0 0 0 0 0 0 0 0 0 0 0
37063 - 0 0 0 0 0 0 0 0 0 0 0 0
37064 - 0 0 0 0 0 0 0 0 0 10 10 10
37065 - 30 30 30 78 78 78 46 46 46 22 22 22
37066 -137 92 6 210 162 10 239 182 13 238 190 10
37067 -238 202 15 241 208 19 246 215 20 246 215 20
37068 -241 208 19 203 166 17 185 133 11 210 150 10
37069 -216 158 10 210 150 10 102 78 10 2 2 6
37070 - 6 6 6 54 54 54 14 14 14 2 2 6
37071 - 2 2 6 62 62 62 74 74 74 30 30 30
37072 - 10 10 10 0 0 0 0 0 0 0 0 0
37073 - 0 0 0 0 0 0 0 0 0 0 0 0
37074 - 0 0 0 0 0 0 0 0 0 0 0 0
37075 - 0 0 0 0 0 0 0 0 0 0 0 0
37076 - 0 0 0 0 0 0 0 0 0 0 0 0
37077 - 0 0 0 0 0 0 0 0 0 0 0 0
37078 - 0 0 0 0 0 0 0 0 0 0 0 0
37079 - 0 0 0 0 0 0 0 0 0 0 0 0
37080 - 0 0 0 0 0 0 0 0 0 0 0 0
37081 - 0 0 0 0 0 0 0 0 0 0 0 0
37082 - 0 0 0 0 0 0 0 0 0 0 0 0
37083 - 0 0 0 0 0 0 0 0 0 0 0 0
37084 - 0 0 0 0 0 0 0 0 0 10 10 10
37085 - 34 34 34 78 78 78 50 50 50 6 6 6
37086 - 94 70 30 139 102 15 190 146 13 226 184 13
37087 -232 200 30 232 195 16 215 174 15 190 146 13
37088 -168 122 10 192 133 9 210 150 10 213 154 11
37089 -202 150 34 182 157 106 101 98 89 2 2 6
37090 - 2 2 6 78 78 78 116 116 116 58 58 58
37091 - 2 2 6 22 22 22 90 90 90 46 46 46
37092 - 18 18 18 6 6 6 0 0 0 0 0 0
37093 - 0 0 0 0 0 0 0 0 0 0 0 0
37094 - 0 0 0 0 0 0 0 0 0 0 0 0
37095 - 0 0 0 0 0 0 0 0 0 0 0 0
37096 - 0 0 0 0 0 0 0 0 0 0 0 0
37097 - 0 0 0 0 0 0 0 0 0 0 0 0
37098 - 0 0 0 0 0 0 0 0 0 0 0 0
37099 - 0 0 0 0 0 0 0 0 0 0 0 0
37100 - 0 0 0 0 0 0 0 0 0 0 0 0
37101 - 0 0 0 0 0 0 0 0 0 0 0 0
37102 - 0 0 0 0 0 0 0 0 0 0 0 0
37103 - 0 0 0 0 0 0 0 0 0 0 0 0
37104 - 0 0 0 0 0 0 0 0 0 10 10 10
37105 - 38 38 38 86 86 86 50 50 50 6 6 6
37106 -128 128 128 174 154 114 156 107 11 168 122 10
37107 -198 155 10 184 144 12 197 138 11 200 144 11
37108 -206 145 10 206 145 10 197 138 11 188 164 115
37109 -195 195 195 198 198 198 174 174 174 14 14 14
37110 - 2 2 6 22 22 22 116 116 116 116 116 116
37111 - 22 22 22 2 2 6 74 74 74 70 70 70
37112 - 30 30 30 10 10 10 0 0 0 0 0 0
37113 - 0 0 0 0 0 0 0 0 0 0 0 0
37114 - 0 0 0 0 0 0 0 0 0 0 0 0
37115 - 0 0 0 0 0 0 0 0 0 0 0 0
37116 - 0 0 0 0 0 0 0 0 0 0 0 0
37117 - 0 0 0 0 0 0 0 0 0 0 0 0
37118 - 0 0 0 0 0 0 0 0 0 0 0 0
37119 - 0 0 0 0 0 0 0 0 0 0 0 0
37120 - 0 0 0 0 0 0 0 0 0 0 0 0
37121 - 0 0 0 0 0 0 0 0 0 0 0 0
37122 - 0 0 0 0 0 0 0 0 0 0 0 0
37123 - 0 0 0 0 0 0 0 0 0 0 0 0
37124 - 0 0 0 0 0 0 6 6 6 18 18 18
37125 - 50 50 50 101 101 101 26 26 26 10 10 10
37126 -138 138 138 190 190 190 174 154 114 156 107 11
37127 -197 138 11 200 144 11 197 138 11 192 133 9
37128 -180 123 7 190 142 34 190 178 144 187 187 187
37129 -202 202 202 221 221 221 214 214 214 66 66 66
37130 - 2 2 6 2 2 6 50 50 50 62 62 62
37131 - 6 6 6 2 2 6 10 10 10 90 90 90
37132 - 50 50 50 18 18 18 6 6 6 0 0 0
37133 - 0 0 0 0 0 0 0 0 0 0 0 0
37134 - 0 0 0 0 0 0 0 0 0 0 0 0
37135 - 0 0 0 0 0 0 0 0 0 0 0 0
37136 - 0 0 0 0 0 0 0 0 0 0 0 0
37137 - 0 0 0 0 0 0 0 0 0 0 0 0
37138 - 0 0 0 0 0 0 0 0 0 0 0 0
37139 - 0 0 0 0 0 0 0 0 0 0 0 0
37140 - 0 0 0 0 0 0 0 0 0 0 0 0
37141 - 0 0 0 0 0 0 0 0 0 0 0 0
37142 - 0 0 0 0 0 0 0 0 0 0 0 0
37143 - 0 0 0 0 0 0 0 0 0 0 0 0
37144 - 0 0 0 0 0 0 10 10 10 34 34 34
37145 - 74 74 74 74 74 74 2 2 6 6 6 6
37146 -144 144 144 198 198 198 190 190 190 178 166 146
37147 -154 121 60 156 107 11 156 107 11 168 124 44
37148 -174 154 114 187 187 187 190 190 190 210 210 210
37149 -246 246 246 253 253 253 253 253 253 182 182 182
37150 - 6 6 6 2 2 6 2 2 6 2 2 6
37151 - 2 2 6 2 2 6 2 2 6 62 62 62
37152 - 74 74 74 34 34 34 14 14 14 0 0 0
37153 - 0 0 0 0 0 0 0 0 0 0 0 0
37154 - 0 0 0 0 0 0 0 0 0 0 0 0
37155 - 0 0 0 0 0 0 0 0 0 0 0 0
37156 - 0 0 0 0 0 0 0 0 0 0 0 0
37157 - 0 0 0 0 0 0 0 0 0 0 0 0
37158 - 0 0 0 0 0 0 0 0 0 0 0 0
37159 - 0 0 0 0 0 0 0 0 0 0 0 0
37160 - 0 0 0 0 0 0 0 0 0 0 0 0
37161 - 0 0 0 0 0 0 0 0 0 0 0 0
37162 - 0 0 0 0 0 0 0 0 0 0 0 0
37163 - 0 0 0 0 0 0 0 0 0 0 0 0
37164 - 0 0 0 10 10 10 22 22 22 54 54 54
37165 - 94 94 94 18 18 18 2 2 6 46 46 46
37166 -234 234 234 221 221 221 190 190 190 190 190 190
37167 -190 190 190 187 187 187 187 187 187 190 190 190
37168 -190 190 190 195 195 195 214 214 214 242 242 242
37169 -253 253 253 253 253 253 253 253 253 253 253 253
37170 - 82 82 82 2 2 6 2 2 6 2 2 6
37171 - 2 2 6 2 2 6 2 2 6 14 14 14
37172 - 86 86 86 54 54 54 22 22 22 6 6 6
37173 - 0 0 0 0 0 0 0 0 0 0 0 0
37174 - 0 0 0 0 0 0 0 0 0 0 0 0
37175 - 0 0 0 0 0 0 0 0 0 0 0 0
37176 - 0 0 0 0 0 0 0 0 0 0 0 0
37177 - 0 0 0 0 0 0 0 0 0 0 0 0
37178 - 0 0 0 0 0 0 0 0 0 0 0 0
37179 - 0 0 0 0 0 0 0 0 0 0 0 0
37180 - 0 0 0 0 0 0 0 0 0 0 0 0
37181 - 0 0 0 0 0 0 0 0 0 0 0 0
37182 - 0 0 0 0 0 0 0 0 0 0 0 0
37183 - 0 0 0 0 0 0 0 0 0 0 0 0
37184 - 6 6 6 18 18 18 46 46 46 90 90 90
37185 - 46 46 46 18 18 18 6 6 6 182 182 182
37186 -253 253 253 246 246 246 206 206 206 190 190 190
37187 -190 190 190 190 190 190 190 190 190 190 190 190
37188 -206 206 206 231 231 231 250 250 250 253 253 253
37189 -253 253 253 253 253 253 253 253 253 253 253 253
37190 -202 202 202 14 14 14 2 2 6 2 2 6
37191 - 2 2 6 2 2 6 2 2 6 2 2 6
37192 - 42 42 42 86 86 86 42 42 42 18 18 18
37193 - 6 6 6 0 0 0 0 0 0 0 0 0
37194 - 0 0 0 0 0 0 0 0 0 0 0 0
37195 - 0 0 0 0 0 0 0 0 0 0 0 0
37196 - 0 0 0 0 0 0 0 0 0 0 0 0
37197 - 0 0 0 0 0 0 0 0 0 0 0 0
37198 - 0 0 0 0 0 0 0 0 0 0 0 0
37199 - 0 0 0 0 0 0 0 0 0 0 0 0
37200 - 0 0 0 0 0 0 0 0 0 0 0 0
37201 - 0 0 0 0 0 0 0 0 0 0 0 0
37202 - 0 0 0 0 0 0 0 0 0 0 0 0
37203 - 0 0 0 0 0 0 0 0 0 6 6 6
37204 - 14 14 14 38 38 38 74 74 74 66 66 66
37205 - 2 2 6 6 6 6 90 90 90 250 250 250
37206 -253 253 253 253 253 253 238 238 238 198 198 198
37207 -190 190 190 190 190 190 195 195 195 221 221 221
37208 -246 246 246 253 253 253 253 253 253 253 253 253
37209 -253 253 253 253 253 253 253 253 253 253 253 253
37210 -253 253 253 82 82 82 2 2 6 2 2 6
37211 - 2 2 6 2 2 6 2 2 6 2 2 6
37212 - 2 2 6 78 78 78 70 70 70 34 34 34
37213 - 14 14 14 6 6 6 0 0 0 0 0 0
37214 - 0 0 0 0 0 0 0 0 0 0 0 0
37215 - 0 0 0 0 0 0 0 0 0 0 0 0
37216 - 0 0 0 0 0 0 0 0 0 0 0 0
37217 - 0 0 0 0 0 0 0 0 0 0 0 0
37218 - 0 0 0 0 0 0 0 0 0 0 0 0
37219 - 0 0 0 0 0 0 0 0 0 0 0 0
37220 - 0 0 0 0 0 0 0 0 0 0 0 0
37221 - 0 0 0 0 0 0 0 0 0 0 0 0
37222 - 0 0 0 0 0 0 0 0 0 0 0 0
37223 - 0 0 0 0 0 0 0 0 0 14 14 14
37224 - 34 34 34 66 66 66 78 78 78 6 6 6
37225 - 2 2 6 18 18 18 218 218 218 253 253 253
37226 -253 253 253 253 253 253 253 253 253 246 246 246
37227 -226 226 226 231 231 231 246 246 246 253 253 253
37228 -253 253 253 253 253 253 253 253 253 253 253 253
37229 -253 253 253 253 253 253 253 253 253 253 253 253
37230 -253 253 253 178 178 178 2 2 6 2 2 6
37231 - 2 2 6 2 2 6 2 2 6 2 2 6
37232 - 2 2 6 18 18 18 90 90 90 62 62 62
37233 - 30 30 30 10 10 10 0 0 0 0 0 0
37234 - 0 0 0 0 0 0 0 0 0 0 0 0
37235 - 0 0 0 0 0 0 0 0 0 0 0 0
37236 - 0 0 0 0 0 0 0 0 0 0 0 0
37237 - 0 0 0 0 0 0 0 0 0 0 0 0
37238 - 0 0 0 0 0 0 0 0 0 0 0 0
37239 - 0 0 0 0 0 0 0 0 0 0 0 0
37240 - 0 0 0 0 0 0 0 0 0 0 0 0
37241 - 0 0 0 0 0 0 0 0 0 0 0 0
37242 - 0 0 0 0 0 0 0 0 0 0 0 0
37243 - 0 0 0 0 0 0 10 10 10 26 26 26
37244 - 58 58 58 90 90 90 18 18 18 2 2 6
37245 - 2 2 6 110 110 110 253 253 253 253 253 253
37246 -253 253 253 253 253 253 253 253 253 253 253 253
37247 -250 250 250 253 253 253 253 253 253 253 253 253
37248 -253 253 253 253 253 253 253 253 253 253 253 253
37249 -253 253 253 253 253 253 253 253 253 253 253 253
37250 -253 253 253 231 231 231 18 18 18 2 2 6
37251 - 2 2 6 2 2 6 2 2 6 2 2 6
37252 - 2 2 6 2 2 6 18 18 18 94 94 94
37253 - 54 54 54 26 26 26 10 10 10 0 0 0
37254 - 0 0 0 0 0 0 0 0 0 0 0 0
37255 - 0 0 0 0 0 0 0 0 0 0 0 0
37256 - 0 0 0 0 0 0 0 0 0 0 0 0
37257 - 0 0 0 0 0 0 0 0 0 0 0 0
37258 - 0 0 0 0 0 0 0 0 0 0 0 0
37259 - 0 0 0 0 0 0 0 0 0 0 0 0
37260 - 0 0 0 0 0 0 0 0 0 0 0 0
37261 - 0 0 0 0 0 0 0 0 0 0 0 0
37262 - 0 0 0 0 0 0 0 0 0 0 0 0
37263 - 0 0 0 6 6 6 22 22 22 50 50 50
37264 - 90 90 90 26 26 26 2 2 6 2 2 6
37265 - 14 14 14 195 195 195 250 250 250 253 253 253
37266 -253 253 253 253 253 253 253 253 253 253 253 253
37267 -253 253 253 253 253 253 253 253 253 253 253 253
37268 -253 253 253 253 253 253 253 253 253 253 253 253
37269 -253 253 253 253 253 253 253 253 253 253 253 253
37270 -250 250 250 242 242 242 54 54 54 2 2 6
37271 - 2 2 6 2 2 6 2 2 6 2 2 6
37272 - 2 2 6 2 2 6 2 2 6 38 38 38
37273 - 86 86 86 50 50 50 22 22 22 6 6 6
37274 - 0 0 0 0 0 0 0 0 0 0 0 0
37275 - 0 0 0 0 0 0 0 0 0 0 0 0
37276 - 0 0 0 0 0 0 0 0 0 0 0 0
37277 - 0 0 0 0 0 0 0 0 0 0 0 0
37278 - 0 0 0 0 0 0 0 0 0 0 0 0
37279 - 0 0 0 0 0 0 0 0 0 0 0 0
37280 - 0 0 0 0 0 0 0 0 0 0 0 0
37281 - 0 0 0 0 0 0 0 0 0 0 0 0
37282 - 0 0 0 0 0 0 0 0 0 0 0 0
37283 - 6 6 6 14 14 14 38 38 38 82 82 82
37284 - 34 34 34 2 2 6 2 2 6 2 2 6
37285 - 42 42 42 195 195 195 246 246 246 253 253 253
37286 -253 253 253 253 253 253 253 253 253 250 250 250
37287 -242 242 242 242 242 242 250 250 250 253 253 253
37288 -253 253 253 253 253 253 253 253 253 253 253 253
37289 -253 253 253 250 250 250 246 246 246 238 238 238
37290 -226 226 226 231 231 231 101 101 101 6 6 6
37291 - 2 2 6 2 2 6 2 2 6 2 2 6
37292 - 2 2 6 2 2 6 2 2 6 2 2 6
37293 - 38 38 38 82 82 82 42 42 42 14 14 14
37294 - 6 6 6 0 0 0 0 0 0 0 0 0
37295 - 0 0 0 0 0 0 0 0 0 0 0 0
37296 - 0 0 0 0 0 0 0 0 0 0 0 0
37297 - 0 0 0 0 0 0 0 0 0 0 0 0
37298 - 0 0 0 0 0 0 0 0 0 0 0 0
37299 - 0 0 0 0 0 0 0 0 0 0 0 0
37300 - 0 0 0 0 0 0 0 0 0 0 0 0
37301 - 0 0 0 0 0 0 0 0 0 0 0 0
37302 - 0 0 0 0 0 0 0 0 0 0 0 0
37303 - 10 10 10 26 26 26 62 62 62 66 66 66
37304 - 2 2 6 2 2 6 2 2 6 6 6 6
37305 - 70 70 70 170 170 170 206 206 206 234 234 234
37306 -246 246 246 250 250 250 250 250 250 238 238 238
37307 -226 226 226 231 231 231 238 238 238 250 250 250
37308 -250 250 250 250 250 250 246 246 246 231 231 231
37309 -214 214 214 206 206 206 202 202 202 202 202 202
37310 -198 198 198 202 202 202 182 182 182 18 18 18
37311 - 2 2 6 2 2 6 2 2 6 2 2 6
37312 - 2 2 6 2 2 6 2 2 6 2 2 6
37313 - 2 2 6 62 62 62 66 66 66 30 30 30
37314 - 10 10 10 0 0 0 0 0 0 0 0 0
37315 - 0 0 0 0 0 0 0 0 0 0 0 0
37316 - 0 0 0 0 0 0 0 0 0 0 0 0
37317 - 0 0 0 0 0 0 0 0 0 0 0 0
37318 - 0 0 0 0 0 0 0 0 0 0 0 0
37319 - 0 0 0 0 0 0 0 0 0 0 0 0
37320 - 0 0 0 0 0 0 0 0 0 0 0 0
37321 - 0 0 0 0 0 0 0 0 0 0 0 0
37322 - 0 0 0 0 0 0 0 0 0 0 0 0
37323 - 14 14 14 42 42 42 82 82 82 18 18 18
37324 - 2 2 6 2 2 6 2 2 6 10 10 10
37325 - 94 94 94 182 182 182 218 218 218 242 242 242
37326 -250 250 250 253 253 253 253 253 253 250 250 250
37327 -234 234 234 253 253 253 253 253 253 253 253 253
37328 -253 253 253 253 253 253 253 253 253 246 246 246
37329 -238 238 238 226 226 226 210 210 210 202 202 202
37330 -195 195 195 195 195 195 210 210 210 158 158 158
37331 - 6 6 6 14 14 14 50 50 50 14 14 14
37332 - 2 2 6 2 2 6 2 2 6 2 2 6
37333 - 2 2 6 6 6 6 86 86 86 46 46 46
37334 - 18 18 18 6 6 6 0 0 0 0 0 0
37335 - 0 0 0 0 0 0 0 0 0 0 0 0
37336 - 0 0 0 0 0 0 0 0 0 0 0 0
37337 - 0 0 0 0 0 0 0 0 0 0 0 0
37338 - 0 0 0 0 0 0 0 0 0 0 0 0
37339 - 0 0 0 0 0 0 0 0 0 0 0 0
37340 - 0 0 0 0 0 0 0 0 0 0 0 0
37341 - 0 0 0 0 0 0 0 0 0 0 0 0
37342 - 0 0 0 0 0 0 0 0 0 6 6 6
37343 - 22 22 22 54 54 54 70 70 70 2 2 6
37344 - 2 2 6 10 10 10 2 2 6 22 22 22
37345 -166 166 166 231 231 231 250 250 250 253 253 253
37346 -253 253 253 253 253 253 253 253 253 250 250 250
37347 -242 242 242 253 253 253 253 253 253 253 253 253
37348 -253 253 253 253 253 253 253 253 253 253 253 253
37349 -253 253 253 253 253 253 253 253 253 246 246 246
37350 -231 231 231 206 206 206 198 198 198 226 226 226
37351 - 94 94 94 2 2 6 6 6 6 38 38 38
37352 - 30 30 30 2 2 6 2 2 6 2 2 6
37353 - 2 2 6 2 2 6 62 62 62 66 66 66
37354 - 26 26 26 10 10 10 0 0 0 0 0 0
37355 - 0 0 0 0 0 0 0 0 0 0 0 0
37356 - 0 0 0 0 0 0 0 0 0 0 0 0
37357 - 0 0 0 0 0 0 0 0 0 0 0 0
37358 - 0 0 0 0 0 0 0 0 0 0 0 0
37359 - 0 0 0 0 0 0 0 0 0 0 0 0
37360 - 0 0 0 0 0 0 0 0 0 0 0 0
37361 - 0 0 0 0 0 0 0 0 0 0 0 0
37362 - 0 0 0 0 0 0 0 0 0 10 10 10
37363 - 30 30 30 74 74 74 50 50 50 2 2 6
37364 - 26 26 26 26 26 26 2 2 6 106 106 106
37365 -238 238 238 253 253 253 253 253 253 253 253 253
37366 -253 253 253 253 253 253 253 253 253 253 253 253
37367 -253 253 253 253 253 253 253 253 253 253 253 253
37368 -253 253 253 253 253 253 253 253 253 253 253 253
37369 -253 253 253 253 253 253 253 253 253 253 253 253
37370 -253 253 253 246 246 246 218 218 218 202 202 202
37371 -210 210 210 14 14 14 2 2 6 2 2 6
37372 - 30 30 30 22 22 22 2 2 6 2 2 6
37373 - 2 2 6 2 2 6 18 18 18 86 86 86
37374 - 42 42 42 14 14 14 0 0 0 0 0 0
37375 - 0 0 0 0 0 0 0 0 0 0 0 0
37376 - 0 0 0 0 0 0 0 0 0 0 0 0
37377 - 0 0 0 0 0 0 0 0 0 0 0 0
37378 - 0 0 0 0 0 0 0 0 0 0 0 0
37379 - 0 0 0 0 0 0 0 0 0 0 0 0
37380 - 0 0 0 0 0 0 0 0 0 0 0 0
37381 - 0 0 0 0 0 0 0 0 0 0 0 0
37382 - 0 0 0 0 0 0 0 0 0 14 14 14
37383 - 42 42 42 90 90 90 22 22 22 2 2 6
37384 - 42 42 42 2 2 6 18 18 18 218 218 218
37385 -253 253 253 253 253 253 253 253 253 253 253 253
37386 -253 253 253 253 253 253 253 253 253 253 253 253
37387 -253 253 253 253 253 253 253 253 253 253 253 253
37388 -253 253 253 253 253 253 253 253 253 253 253 253
37389 -253 253 253 253 253 253 253 253 253 253 253 253
37390 -253 253 253 253 253 253 250 250 250 221 221 221
37391 -218 218 218 101 101 101 2 2 6 14 14 14
37392 - 18 18 18 38 38 38 10 10 10 2 2 6
37393 - 2 2 6 2 2 6 2 2 6 78 78 78
37394 - 58 58 58 22 22 22 6 6 6 0 0 0
37395 - 0 0 0 0 0 0 0 0 0 0 0 0
37396 - 0 0 0 0 0 0 0 0 0 0 0 0
37397 - 0 0 0 0 0 0 0 0 0 0 0 0
37398 - 0 0 0 0 0 0 0 0 0 0 0 0
37399 - 0 0 0 0 0 0 0 0 0 0 0 0
37400 - 0 0 0 0 0 0 0 0 0 0 0 0
37401 - 0 0 0 0 0 0 0 0 0 0 0 0
37402 - 0 0 0 0 0 0 6 6 6 18 18 18
37403 - 54 54 54 82 82 82 2 2 6 26 26 26
37404 - 22 22 22 2 2 6 123 123 123 253 253 253
37405 -253 253 253 253 253 253 253 253 253 253 253 253
37406 -253 253 253 253 253 253 253 253 253 253 253 253
37407 -253 253 253 253 253 253 253 253 253 253 253 253
37408 -253 253 253 253 253 253 253 253 253 253 253 253
37409 -253 253 253 253 253 253 253 253 253 253 253 253
37410 -253 253 253 253 253 253 253 253 253 250 250 250
37411 -238 238 238 198 198 198 6 6 6 38 38 38
37412 - 58 58 58 26 26 26 38 38 38 2 2 6
37413 - 2 2 6 2 2 6 2 2 6 46 46 46
37414 - 78 78 78 30 30 30 10 10 10 0 0 0
37415 - 0 0 0 0 0 0 0 0 0 0 0 0
37416 - 0 0 0 0 0 0 0 0 0 0 0 0
37417 - 0 0 0 0 0 0 0 0 0 0 0 0
37418 - 0 0 0 0 0 0 0 0 0 0 0 0
37419 - 0 0 0 0 0 0 0 0 0 0 0 0
37420 - 0 0 0 0 0 0 0 0 0 0 0 0
37421 - 0 0 0 0 0 0 0 0 0 0 0 0
37422 - 0 0 0 0 0 0 10 10 10 30 30 30
37423 - 74 74 74 58 58 58 2 2 6 42 42 42
37424 - 2 2 6 22 22 22 231 231 231 253 253 253
37425 -253 253 253 253 253 253 253 253 253 253 253 253
37426 -253 253 253 253 253 253 253 253 253 250 250 250
37427 -253 253 253 253 253 253 253 253 253 253 253 253
37428 -253 253 253 253 253 253 253 253 253 253 253 253
37429 -253 253 253 253 253 253 253 253 253 253 253 253
37430 -253 253 253 253 253 253 253 253 253 253 253 253
37431 -253 253 253 246 246 246 46 46 46 38 38 38
37432 - 42 42 42 14 14 14 38 38 38 14 14 14
37433 - 2 2 6 2 2 6 2 2 6 6 6 6
37434 - 86 86 86 46 46 46 14 14 14 0 0 0
37435 - 0 0 0 0 0 0 0 0 0 0 0 0
37436 - 0 0 0 0 0 0 0 0 0 0 0 0
37437 - 0 0 0 0 0 0 0 0 0 0 0 0
37438 - 0 0 0 0 0 0 0 0 0 0 0 0
37439 - 0 0 0 0 0 0 0 0 0 0 0 0
37440 - 0 0 0 0 0 0 0 0 0 0 0 0
37441 - 0 0 0 0 0 0 0 0 0 0 0 0
37442 - 0 0 0 6 6 6 14 14 14 42 42 42
37443 - 90 90 90 18 18 18 18 18 18 26 26 26
37444 - 2 2 6 116 116 116 253 253 253 253 253 253
37445 -253 253 253 253 253 253 253 253 253 253 253 253
37446 -253 253 253 253 253 253 250 250 250 238 238 238
37447 -253 253 253 253 253 253 253 253 253 253 253 253
37448 -253 253 253 253 253 253 253 253 253 253 253 253
37449 -253 253 253 253 253 253 253 253 253 253 253 253
37450 -253 253 253 253 253 253 253 253 253 253 253 253
37451 -253 253 253 253 253 253 94 94 94 6 6 6
37452 - 2 2 6 2 2 6 10 10 10 34 34 34
37453 - 2 2 6 2 2 6 2 2 6 2 2 6
37454 - 74 74 74 58 58 58 22 22 22 6 6 6
37455 - 0 0 0 0 0 0 0 0 0 0 0 0
37456 - 0 0 0 0 0 0 0 0 0 0 0 0
37457 - 0 0 0 0 0 0 0 0 0 0 0 0
37458 - 0 0 0 0 0 0 0 0 0 0 0 0
37459 - 0 0 0 0 0 0 0 0 0 0 0 0
37460 - 0 0 0 0 0 0 0 0 0 0 0 0
37461 - 0 0 0 0 0 0 0 0 0 0 0 0
37462 - 0 0 0 10 10 10 26 26 26 66 66 66
37463 - 82 82 82 2 2 6 38 38 38 6 6 6
37464 - 14 14 14 210 210 210 253 253 253 253 253 253
37465 -253 253 253 253 253 253 253 253 253 253 253 253
37466 -253 253 253 253 253 253 246 246 246 242 242 242
37467 -253 253 253 253 253 253 253 253 253 253 253 253
37468 -253 253 253 253 253 253 253 253 253 253 253 253
37469 -253 253 253 253 253 253 253 253 253 253 253 253
37470 -253 253 253 253 253 253 253 253 253 253 253 253
37471 -253 253 253 253 253 253 144 144 144 2 2 6
37472 - 2 2 6 2 2 6 2 2 6 46 46 46
37473 - 2 2 6 2 2 6 2 2 6 2 2 6
37474 - 42 42 42 74 74 74 30 30 30 10 10 10
37475 - 0 0 0 0 0 0 0 0 0 0 0 0
37476 - 0 0 0 0 0 0 0 0 0 0 0 0
37477 - 0 0 0 0 0 0 0 0 0 0 0 0
37478 - 0 0 0 0 0 0 0 0 0 0 0 0
37479 - 0 0 0 0 0 0 0 0 0 0 0 0
37480 - 0 0 0 0 0 0 0 0 0 0 0 0
37481 - 0 0 0 0 0 0 0 0 0 0 0 0
37482 - 6 6 6 14 14 14 42 42 42 90 90 90
37483 - 26 26 26 6 6 6 42 42 42 2 2 6
37484 - 74 74 74 250 250 250 253 253 253 253 253 253
37485 -253 253 253 253 253 253 253 253 253 253 253 253
37486 -253 253 253 253 253 253 242 242 242 242 242 242
37487 -253 253 253 253 253 253 253 253 253 253 253 253
37488 -253 253 253 253 253 253 253 253 253 253 253 253
37489 -253 253 253 253 253 253 253 253 253 253 253 253
37490 -253 253 253 253 253 253 253 253 253 253 253 253
37491 -253 253 253 253 253 253 182 182 182 2 2 6
37492 - 2 2 6 2 2 6 2 2 6 46 46 46
37493 - 2 2 6 2 2 6 2 2 6 2 2 6
37494 - 10 10 10 86 86 86 38 38 38 10 10 10
37495 - 0 0 0 0 0 0 0 0 0 0 0 0
37496 - 0 0 0 0 0 0 0 0 0 0 0 0
37497 - 0 0 0 0 0 0 0 0 0 0 0 0
37498 - 0 0 0 0 0 0 0 0 0 0 0 0
37499 - 0 0 0 0 0 0 0 0 0 0 0 0
37500 - 0 0 0 0 0 0 0 0 0 0 0 0
37501 - 0 0 0 0 0 0 0 0 0 0 0 0
37502 - 10 10 10 26 26 26 66 66 66 82 82 82
37503 - 2 2 6 22 22 22 18 18 18 2 2 6
37504 -149 149 149 253 253 253 253 253 253 253 253 253
37505 -253 253 253 253 253 253 253 253 253 253 253 253
37506 -253 253 253 253 253 253 234 234 234 242 242 242
37507 -253 253 253 253 253 253 253 253 253 253 253 253
37508 -253 253 253 253 253 253 253 253 253 253 253 253
37509 -253 253 253 253 253 253 253 253 253 253 253 253
37510 -253 253 253 253 253 253 253 253 253 253 253 253
37511 -253 253 253 253 253 253 206 206 206 2 2 6
37512 - 2 2 6 2 2 6 2 2 6 38 38 38
37513 - 2 2 6 2 2 6 2 2 6 2 2 6
37514 - 6 6 6 86 86 86 46 46 46 14 14 14
37515 - 0 0 0 0 0 0 0 0 0 0 0 0
37516 - 0 0 0 0 0 0 0 0 0 0 0 0
37517 - 0 0 0 0 0 0 0 0 0 0 0 0
37518 - 0 0 0 0 0 0 0 0 0 0 0 0
37519 - 0 0 0 0 0 0 0 0 0 0 0 0
37520 - 0 0 0 0 0 0 0 0 0 0 0 0
37521 - 0 0 0 0 0 0 0 0 0 6 6 6
37522 - 18 18 18 46 46 46 86 86 86 18 18 18
37523 - 2 2 6 34 34 34 10 10 10 6 6 6
37524 -210 210 210 253 253 253 253 253 253 253 253 253
37525 -253 253 253 253 253 253 253 253 253 253 253 253
37526 -253 253 253 253 253 253 234 234 234 242 242 242
37527 -253 253 253 253 253 253 253 253 253 253 253 253
37528 -253 253 253 253 253 253 253 253 253 253 253 253
37529 -253 253 253 253 253 253 253 253 253 253 253 253
37530 -253 253 253 253 253 253 253 253 253 253 253 253
37531 -253 253 253 253 253 253 221 221 221 6 6 6
37532 - 2 2 6 2 2 6 6 6 6 30 30 30
37533 - 2 2 6 2 2 6 2 2 6 2 2 6
37534 - 2 2 6 82 82 82 54 54 54 18 18 18
37535 - 6 6 6 0 0 0 0 0 0 0 0 0
37536 - 0 0 0 0 0 0 0 0 0 0 0 0
37537 - 0 0 0 0 0 0 0 0 0 0 0 0
37538 - 0 0 0 0 0 0 0 0 0 0 0 0
37539 - 0 0 0 0 0 0 0 0 0 0 0 0
37540 - 0 0 0 0 0 0 0 0 0 0 0 0
37541 - 0 0 0 0 0 0 0 0 0 10 10 10
37542 - 26 26 26 66 66 66 62 62 62 2 2 6
37543 - 2 2 6 38 38 38 10 10 10 26 26 26
37544 -238 238 238 253 253 253 253 253 253 253 253 253
37545 -253 253 253 253 253 253 253 253 253 253 253 253
37546 -253 253 253 253 253 253 231 231 231 238 238 238
37547 -253 253 253 253 253 253 253 253 253 253 253 253
37548 -253 253 253 253 253 253 253 253 253 253 253 253
37549 -253 253 253 253 253 253 253 253 253 253 253 253
37550 -253 253 253 253 253 253 253 253 253 253 253 253
37551 -253 253 253 253 253 253 231 231 231 6 6 6
37552 - 2 2 6 2 2 6 10 10 10 30 30 30
37553 - 2 2 6 2 2 6 2 2 6 2 2 6
37554 - 2 2 6 66 66 66 58 58 58 22 22 22
37555 - 6 6 6 0 0 0 0 0 0 0 0 0
37556 - 0 0 0 0 0 0 0 0 0 0 0 0
37557 - 0 0 0 0 0 0 0 0 0 0 0 0
37558 - 0 0 0 0 0 0 0 0 0 0 0 0
37559 - 0 0 0 0 0 0 0 0 0 0 0 0
37560 - 0 0 0 0 0 0 0 0 0 0 0 0
37561 - 0 0 0 0 0 0 0 0 0 10 10 10
37562 - 38 38 38 78 78 78 6 6 6 2 2 6
37563 - 2 2 6 46 46 46 14 14 14 42 42 42
37564 -246 246 246 253 253 253 253 253 253 253 253 253
37565 -253 253 253 253 253 253 253 253 253 253 253 253
37566 -253 253 253 253 253 253 231 231 231 242 242 242
37567 -253 253 253 253 253 253 253 253 253 253 253 253
37568 -253 253 253 253 253 253 253 253 253 253 253 253
37569 -253 253 253 253 253 253 253 253 253 253 253 253
37570 -253 253 253 253 253 253 253 253 253 253 253 253
37571 -253 253 253 253 253 253 234 234 234 10 10 10
37572 - 2 2 6 2 2 6 22 22 22 14 14 14
37573 - 2 2 6 2 2 6 2 2 6 2 2 6
37574 - 2 2 6 66 66 66 62 62 62 22 22 22
37575 - 6 6 6 0 0 0 0 0 0 0 0 0
37576 - 0 0 0 0 0 0 0 0 0 0 0 0
37577 - 0 0 0 0 0 0 0 0 0 0 0 0
37578 - 0 0 0 0 0 0 0 0 0 0 0 0
37579 - 0 0 0 0 0 0 0 0 0 0 0 0
37580 - 0 0 0 0 0 0 0 0 0 0 0 0
37581 - 0 0 0 0 0 0 6 6 6 18 18 18
37582 - 50 50 50 74 74 74 2 2 6 2 2 6
37583 - 14 14 14 70 70 70 34 34 34 62 62 62
37584 -250 250 250 253 253 253 253 253 253 253 253 253
37585 -253 253 253 253 253 253 253 253 253 253 253 253
37586 -253 253 253 253 253 253 231 231 231 246 246 246
37587 -253 253 253 253 253 253 253 253 253 253 253 253
37588 -253 253 253 253 253 253 253 253 253 253 253 253
37589 -253 253 253 253 253 253 253 253 253 253 253 253
37590 -253 253 253 253 253 253 253 253 253 253 253 253
37591 -253 253 253 253 253 253 234 234 234 14 14 14
37592 - 2 2 6 2 2 6 30 30 30 2 2 6
37593 - 2 2 6 2 2 6 2 2 6 2 2 6
37594 - 2 2 6 66 66 66 62 62 62 22 22 22
37595 - 6 6 6 0 0 0 0 0 0 0 0 0
37596 - 0 0 0 0 0 0 0 0 0 0 0 0
37597 - 0 0 0 0 0 0 0 0 0 0 0 0
37598 - 0 0 0 0 0 0 0 0 0 0 0 0
37599 - 0 0 0 0 0 0 0 0 0 0 0 0
37600 - 0 0 0 0 0 0 0 0 0 0 0 0
37601 - 0 0 0 0 0 0 6 6 6 18 18 18
37602 - 54 54 54 62 62 62 2 2 6 2 2 6
37603 - 2 2 6 30 30 30 46 46 46 70 70 70
37604 -250 250 250 253 253 253 253 253 253 253 253 253
37605 -253 253 253 253 253 253 253 253 253 253 253 253
37606 -253 253 253 253 253 253 231 231 231 246 246 246
37607 -253 253 253 253 253 253 253 253 253 253 253 253
37608 -253 253 253 253 253 253 253 253 253 253 253 253
37609 -253 253 253 253 253 253 253 253 253 253 253 253
37610 -253 253 253 253 253 253 253 253 253 253 253 253
37611 -253 253 253 253 253 253 226 226 226 10 10 10
37612 - 2 2 6 6 6 6 30 30 30 2 2 6
37613 - 2 2 6 2 2 6 2 2 6 2 2 6
37614 - 2 2 6 66 66 66 58 58 58 22 22 22
37615 - 6 6 6 0 0 0 0 0 0 0 0 0
37616 - 0 0 0 0 0 0 0 0 0 0 0 0
37617 - 0 0 0 0 0 0 0 0 0 0 0 0
37618 - 0 0 0 0 0 0 0 0 0 0 0 0
37619 - 0 0 0 0 0 0 0 0 0 0 0 0
37620 - 0 0 0 0 0 0 0 0 0 0 0 0
37621 - 0 0 0 0 0 0 6 6 6 22 22 22
37622 - 58 58 58 62 62 62 2 2 6 2 2 6
37623 - 2 2 6 2 2 6 30 30 30 78 78 78
37624 -250 250 250 253 253 253 253 253 253 253 253 253
37625 -253 253 253 253 253 253 253 253 253 253 253 253
37626 -253 253 253 253 253 253 231 231 231 246 246 246
37627 -253 253 253 253 253 253 253 253 253 253 253 253
37628 -253 253 253 253 253 253 253 253 253 253 253 253
37629 -253 253 253 253 253 253 253 253 253 253 253 253
37630 -253 253 253 253 253 253 253 253 253 253 253 253
37631 -253 253 253 253 253 253 206 206 206 2 2 6
37632 - 22 22 22 34 34 34 18 14 6 22 22 22
37633 - 26 26 26 18 18 18 6 6 6 2 2 6
37634 - 2 2 6 82 82 82 54 54 54 18 18 18
37635 - 6 6 6 0 0 0 0 0 0 0 0 0
37636 - 0 0 0 0 0 0 0 0 0 0 0 0
37637 - 0 0 0 0 0 0 0 0 0 0 0 0
37638 - 0 0 0 0 0 0 0 0 0 0 0 0
37639 - 0 0 0 0 0 0 0 0 0 0 0 0
37640 - 0 0 0 0 0 0 0 0 0 0 0 0
37641 - 0 0 0 0 0 0 6 6 6 26 26 26
37642 - 62 62 62 106 106 106 74 54 14 185 133 11
37643 -210 162 10 121 92 8 6 6 6 62 62 62
37644 -238 238 238 253 253 253 253 253 253 253 253 253
37645 -253 253 253 253 253 253 253 253 253 253 253 253
37646 -253 253 253 253 253 253 231 231 231 246 246 246
37647 -253 253 253 253 253 253 253 253 253 253 253 253
37648 -253 253 253 253 253 253 253 253 253 253 253 253
37649 -253 253 253 253 253 253 253 253 253 253 253 253
37650 -253 253 253 253 253 253 253 253 253 253 253 253
37651 -253 253 253 253 253 253 158 158 158 18 18 18
37652 - 14 14 14 2 2 6 2 2 6 2 2 6
37653 - 6 6 6 18 18 18 66 66 66 38 38 38
37654 - 6 6 6 94 94 94 50 50 50 18 18 18
37655 - 6 6 6 0 0 0 0 0 0 0 0 0
37656 - 0 0 0 0 0 0 0 0 0 0 0 0
37657 - 0 0 0 0 0 0 0 0 0 0 0 0
37658 - 0 0 0 0 0 0 0 0 0 0 0 0
37659 - 0 0 0 0 0 0 0 0 0 0 0 0
37660 - 0 0 0 0 0 0 0 0 0 6 6 6
37661 - 10 10 10 10 10 10 18 18 18 38 38 38
37662 - 78 78 78 142 134 106 216 158 10 242 186 14
37663 -246 190 14 246 190 14 156 118 10 10 10 10
37664 - 90 90 90 238 238 238 253 253 253 253 253 253
37665 -253 253 253 253 253 253 253 253 253 253 253 253
37666 -253 253 253 253 253 253 231 231 231 250 250 250
37667 -253 253 253 253 253 253 253 253 253 253 253 253
37668 -253 253 253 253 253 253 253 253 253 253 253 253
37669 -253 253 253 253 253 253 253 253 253 253 253 253
37670 -253 253 253 253 253 253 253 253 253 246 230 190
37671 -238 204 91 238 204 91 181 142 44 37 26 9
37672 - 2 2 6 2 2 6 2 2 6 2 2 6
37673 - 2 2 6 2 2 6 38 38 38 46 46 46
37674 - 26 26 26 106 106 106 54 54 54 18 18 18
37675 - 6 6 6 0 0 0 0 0 0 0 0 0
37676 - 0 0 0 0 0 0 0 0 0 0 0 0
37677 - 0 0 0 0 0 0 0 0 0 0 0 0
37678 - 0 0 0 0 0 0 0 0 0 0 0 0
37679 - 0 0 0 0 0 0 0 0 0 0 0 0
37680 - 0 0 0 6 6 6 14 14 14 22 22 22
37681 - 30 30 30 38 38 38 50 50 50 70 70 70
37682 -106 106 106 190 142 34 226 170 11 242 186 14
37683 -246 190 14 246 190 14 246 190 14 154 114 10
37684 - 6 6 6 74 74 74 226 226 226 253 253 253
37685 -253 253 253 253 253 253 253 253 253 253 253 253
37686 -253 253 253 253 253 253 231 231 231 250 250 250
37687 -253 253 253 253 253 253 253 253 253 253 253 253
37688 -253 253 253 253 253 253 253 253 253 253 253 253
37689 -253 253 253 253 253 253 253 253 253 253 253 253
37690 -253 253 253 253 253 253 253 253 253 228 184 62
37691 -241 196 14 241 208 19 232 195 16 38 30 10
37692 - 2 2 6 2 2 6 2 2 6 2 2 6
37693 - 2 2 6 6 6 6 30 30 30 26 26 26
37694 -203 166 17 154 142 90 66 66 66 26 26 26
37695 - 6 6 6 0 0 0 0 0 0 0 0 0
37696 - 0 0 0 0 0 0 0 0 0 0 0 0
37697 - 0 0 0 0 0 0 0 0 0 0 0 0
37698 - 0 0 0 0 0 0 0 0 0 0 0 0
37699 - 0 0 0 0 0 0 0 0 0 0 0 0
37700 - 6 6 6 18 18 18 38 38 38 58 58 58
37701 - 78 78 78 86 86 86 101 101 101 123 123 123
37702 -175 146 61 210 150 10 234 174 13 246 186 14
37703 -246 190 14 246 190 14 246 190 14 238 190 10
37704 -102 78 10 2 2 6 46 46 46 198 198 198
37705 -253 253 253 253 253 253 253 253 253 253 253 253
37706 -253 253 253 253 253 253 234 234 234 242 242 242
37707 -253 253 253 253 253 253 253 253 253 253 253 253
37708 -253 253 253 253 253 253 253 253 253 253 253 253
37709 -253 253 253 253 253 253 253 253 253 253 253 253
37710 -253 253 253 253 253 253 253 253 253 224 178 62
37711 -242 186 14 241 196 14 210 166 10 22 18 6
37712 - 2 2 6 2 2 6 2 2 6 2 2 6
37713 - 2 2 6 2 2 6 6 6 6 121 92 8
37714 -238 202 15 232 195 16 82 82 82 34 34 34
37715 - 10 10 10 0 0 0 0 0 0 0 0 0
37716 - 0 0 0 0 0 0 0 0 0 0 0 0
37717 - 0 0 0 0 0 0 0 0 0 0 0 0
37718 - 0 0 0 0 0 0 0 0 0 0 0 0
37719 - 0 0 0 0 0 0 0 0 0 0 0 0
37720 - 14 14 14 38 38 38 70 70 70 154 122 46
37721 -190 142 34 200 144 11 197 138 11 197 138 11
37722 -213 154 11 226 170 11 242 186 14 246 190 14
37723 -246 190 14 246 190 14 246 190 14 246 190 14
37724 -225 175 15 46 32 6 2 2 6 22 22 22
37725 -158 158 158 250 250 250 253 253 253 253 253 253
37726 -253 253 253 253 253 253 253 253 253 253 253 253
37727 -253 253 253 253 253 253 253 253 253 253 253 253
37728 -253 253 253 253 253 253 253 253 253 253 253 253
37729 -253 253 253 253 253 253 253 253 253 253 253 253
37730 -253 253 253 250 250 250 242 242 242 224 178 62
37731 -239 182 13 236 186 11 213 154 11 46 32 6
37732 - 2 2 6 2 2 6 2 2 6 2 2 6
37733 - 2 2 6 2 2 6 61 42 6 225 175 15
37734 -238 190 10 236 186 11 112 100 78 42 42 42
37735 - 14 14 14 0 0 0 0 0 0 0 0 0
37736 - 0 0 0 0 0 0 0 0 0 0 0 0
37737 - 0 0 0 0 0 0 0 0 0 0 0 0
37738 - 0 0 0 0 0 0 0 0 0 0 0 0
37739 - 0 0 0 0 0 0 0 0 0 6 6 6
37740 - 22 22 22 54 54 54 154 122 46 213 154 11
37741 -226 170 11 230 174 11 226 170 11 226 170 11
37742 -236 178 12 242 186 14 246 190 14 246 190 14
37743 -246 190 14 246 190 14 246 190 14 246 190 14
37744 -241 196 14 184 144 12 10 10 10 2 2 6
37745 - 6 6 6 116 116 116 242 242 242 253 253 253
37746 -253 253 253 253 253 253 253 253 253 253 253 253
37747 -253 253 253 253 253 253 253 253 253 253 253 253
37748 -253 253 253 253 253 253 253 253 253 253 253 253
37749 -253 253 253 253 253 253 253 253 253 253 253 253
37750 -253 253 253 231 231 231 198 198 198 214 170 54
37751 -236 178 12 236 178 12 210 150 10 137 92 6
37752 - 18 14 6 2 2 6 2 2 6 2 2 6
37753 - 6 6 6 70 47 6 200 144 11 236 178 12
37754 -239 182 13 239 182 13 124 112 88 58 58 58
37755 - 22 22 22 6 6 6 0 0 0 0 0 0
37756 - 0 0 0 0 0 0 0 0 0 0 0 0
37757 - 0 0 0 0 0 0 0 0 0 0 0 0
37758 - 0 0 0 0 0 0 0 0 0 0 0 0
37759 - 0 0 0 0 0 0 0 0 0 10 10 10
37760 - 30 30 30 70 70 70 180 133 36 226 170 11
37761 -239 182 13 242 186 14 242 186 14 246 186 14
37762 -246 190 14 246 190 14 246 190 14 246 190 14
37763 -246 190 14 246 190 14 246 190 14 246 190 14
37764 -246 190 14 232 195 16 98 70 6 2 2 6
37765 - 2 2 6 2 2 6 66 66 66 221 221 221
37766 -253 253 253 253 253 253 253 253 253 253 253 253
37767 -253 253 253 253 253 253 253 253 253 253 253 253
37768 -253 253 253 253 253 253 253 253 253 253 253 253
37769 -253 253 253 253 253 253 253 253 253 253 253 253
37770 -253 253 253 206 206 206 198 198 198 214 166 58
37771 -230 174 11 230 174 11 216 158 10 192 133 9
37772 -163 110 8 116 81 8 102 78 10 116 81 8
37773 -167 114 7 197 138 11 226 170 11 239 182 13
37774 -242 186 14 242 186 14 162 146 94 78 78 78
37775 - 34 34 34 14 14 14 6 6 6 0 0 0
37776 - 0 0 0 0 0 0 0 0 0 0 0 0
37777 - 0 0 0 0 0 0 0 0 0 0 0 0
37778 - 0 0 0 0 0 0 0 0 0 0 0 0
37779 - 0 0 0 0 0 0 0 0 0 6 6 6
37780 - 30 30 30 78 78 78 190 142 34 226 170 11
37781 -239 182 13 246 190 14 246 190 14 246 190 14
37782 -246 190 14 246 190 14 246 190 14 246 190 14
37783 -246 190 14 246 190 14 246 190 14 246 190 14
37784 -246 190 14 241 196 14 203 166 17 22 18 6
37785 - 2 2 6 2 2 6 2 2 6 38 38 38
37786 -218 218 218 253 253 253 253 253 253 253 253 253
37787 -253 253 253 253 253 253 253 253 253 253 253 253
37788 -253 253 253 253 253 253 253 253 253 253 253 253
37789 -253 253 253 253 253 253 253 253 253 253 253 253
37790 -250 250 250 206 206 206 198 198 198 202 162 69
37791 -226 170 11 236 178 12 224 166 10 210 150 10
37792 -200 144 11 197 138 11 192 133 9 197 138 11
37793 -210 150 10 226 170 11 242 186 14 246 190 14
37794 -246 190 14 246 186 14 225 175 15 124 112 88
37795 - 62 62 62 30 30 30 14 14 14 6 6 6
37796 - 0 0 0 0 0 0 0 0 0 0 0 0
37797 - 0 0 0 0 0 0 0 0 0 0 0 0
37798 - 0 0 0 0 0 0 0 0 0 0 0 0
37799 - 0 0 0 0 0 0 0 0 0 10 10 10
37800 - 30 30 30 78 78 78 174 135 50 224 166 10
37801 -239 182 13 246 190 14 246 190 14 246 190 14
37802 -246 190 14 246 190 14 246 190 14 246 190 14
37803 -246 190 14 246 190 14 246 190 14 246 190 14
37804 -246 190 14 246 190 14 241 196 14 139 102 15
37805 - 2 2 6 2 2 6 2 2 6 2 2 6
37806 - 78 78 78 250 250 250 253 253 253 253 253 253
37807 -253 253 253 253 253 253 253 253 253 253 253 253
37808 -253 253 253 253 253 253 253 253 253 253 253 253
37809 -253 253 253 253 253 253 253 253 253 253 253 253
37810 -250 250 250 214 214 214 198 198 198 190 150 46
37811 -219 162 10 236 178 12 234 174 13 224 166 10
37812 -216 158 10 213 154 11 213 154 11 216 158 10
37813 -226 170 11 239 182 13 246 190 14 246 190 14
37814 -246 190 14 246 190 14 242 186 14 206 162 42
37815 -101 101 101 58 58 58 30 30 30 14 14 14
37816 - 6 6 6 0 0 0 0 0 0 0 0 0
37817 - 0 0 0 0 0 0 0 0 0 0 0 0
37818 - 0 0 0 0 0 0 0 0 0 0 0 0
37819 - 0 0 0 0 0 0 0 0 0 10 10 10
37820 - 30 30 30 74 74 74 174 135 50 216 158 10
37821 -236 178 12 246 190 14 246 190 14 246 190 14
37822 -246 190 14 246 190 14 246 190 14 246 190 14
37823 -246 190 14 246 190 14 246 190 14 246 190 14
37824 -246 190 14 246 190 14 241 196 14 226 184 13
37825 - 61 42 6 2 2 6 2 2 6 2 2 6
37826 - 22 22 22 238 238 238 253 253 253 253 253 253
37827 -253 253 253 253 253 253 253 253 253 253 253 253
37828 -253 253 253 253 253 253 253 253 253 253 253 253
37829 -253 253 253 253 253 253 253 253 253 253 253 253
37830 -253 253 253 226 226 226 187 187 187 180 133 36
37831 -216 158 10 236 178 12 239 182 13 236 178 12
37832 -230 174 11 226 170 11 226 170 11 230 174 11
37833 -236 178 12 242 186 14 246 190 14 246 190 14
37834 -246 190 14 246 190 14 246 186 14 239 182 13
37835 -206 162 42 106 106 106 66 66 66 34 34 34
37836 - 14 14 14 6 6 6 0 0 0 0 0 0
37837 - 0 0 0 0 0 0 0 0 0 0 0 0
37838 - 0 0 0 0 0 0 0 0 0 0 0 0
37839 - 0 0 0 0 0 0 0 0 0 6 6 6
37840 - 26 26 26 70 70 70 163 133 67 213 154 11
37841 -236 178 12 246 190 14 246 190 14 246 190 14
37842 -246 190 14 246 190 14 246 190 14 246 190 14
37843 -246 190 14 246 190 14 246 190 14 246 190 14
37844 -246 190 14 246 190 14 246 190 14 241 196 14
37845 -190 146 13 18 14 6 2 2 6 2 2 6
37846 - 46 46 46 246 246 246 253 253 253 253 253 253
37847 -253 253 253 253 253 253 253 253 253 253 253 253
37848 -253 253 253 253 253 253 253 253 253 253 253 253
37849 -253 253 253 253 253 253 253 253 253 253 253 253
37850 -253 253 253 221 221 221 86 86 86 156 107 11
37851 -216 158 10 236 178 12 242 186 14 246 186 14
37852 -242 186 14 239 182 13 239 182 13 242 186 14
37853 -242 186 14 246 186 14 246 190 14 246 190 14
37854 -246 190 14 246 190 14 246 190 14 246 190 14
37855 -242 186 14 225 175 15 142 122 72 66 66 66
37856 - 30 30 30 10 10 10 0 0 0 0 0 0
37857 - 0 0 0 0 0 0 0 0 0 0 0 0
37858 - 0 0 0 0 0 0 0 0 0 0 0 0
37859 - 0 0 0 0 0 0 0 0 0 6 6 6
37860 - 26 26 26 70 70 70 163 133 67 210 150 10
37861 -236 178 12 246 190 14 246 190 14 246 190 14
37862 -246 190 14 246 190 14 246 190 14 246 190 14
37863 -246 190 14 246 190 14 246 190 14 246 190 14
37864 -246 190 14 246 190 14 246 190 14 246 190 14
37865 -232 195 16 121 92 8 34 34 34 106 106 106
37866 -221 221 221 253 253 253 253 253 253 253 253 253
37867 -253 253 253 253 253 253 253 253 253 253 253 253
37868 -253 253 253 253 253 253 253 253 253 253 253 253
37869 -253 253 253 253 253 253 253 253 253 253 253 253
37870 -242 242 242 82 82 82 18 14 6 163 110 8
37871 -216 158 10 236 178 12 242 186 14 246 190 14
37872 -246 190 14 246 190 14 246 190 14 246 190 14
37873 -246 190 14 246 190 14 246 190 14 246 190 14
37874 -246 190 14 246 190 14 246 190 14 246 190 14
37875 -246 190 14 246 190 14 242 186 14 163 133 67
37876 - 46 46 46 18 18 18 6 6 6 0 0 0
37877 - 0 0 0 0 0 0 0 0 0 0 0 0
37878 - 0 0 0 0 0 0 0 0 0 0 0 0
37879 - 0 0 0 0 0 0 0 0 0 10 10 10
37880 - 30 30 30 78 78 78 163 133 67 210 150 10
37881 -236 178 12 246 186 14 246 190 14 246 190 14
37882 -246 190 14 246 190 14 246 190 14 246 190 14
37883 -246 190 14 246 190 14 246 190 14 246 190 14
37884 -246 190 14 246 190 14 246 190 14 246 190 14
37885 -241 196 14 215 174 15 190 178 144 253 253 253
37886 -253 253 253 253 253 253 253 253 253 253 253 253
37887 -253 253 253 253 253 253 253 253 253 253 253 253
37888 -253 253 253 253 253 253 253 253 253 253 253 253
37889 -253 253 253 253 253 253 253 253 253 218 218 218
37890 - 58 58 58 2 2 6 22 18 6 167 114 7
37891 -216 158 10 236 178 12 246 186 14 246 190 14
37892 -246 190 14 246 190 14 246 190 14 246 190 14
37893 -246 190 14 246 190 14 246 190 14 246 190 14
37894 -246 190 14 246 190 14 246 190 14 246 190 14
37895 -246 190 14 246 186 14 242 186 14 190 150 46
37896 - 54 54 54 22 22 22 6 6 6 0 0 0
37897 - 0 0 0 0 0 0 0 0 0 0 0 0
37898 - 0 0 0 0 0 0 0 0 0 0 0 0
37899 - 0 0 0 0 0 0 0 0 0 14 14 14
37900 - 38 38 38 86 86 86 180 133 36 213 154 11
37901 -236 178 12 246 186 14 246 190 14 246 190 14
37902 -246 190 14 246 190 14 246 190 14 246 190 14
37903 -246 190 14 246 190 14 246 190 14 246 190 14
37904 -246 190 14 246 190 14 246 190 14 246 190 14
37905 -246 190 14 232 195 16 190 146 13 214 214 214
37906 -253 253 253 253 253 253 253 253 253 253 253 253
37907 -253 253 253 253 253 253 253 253 253 253 253 253
37908 -253 253 253 253 253 253 253 253 253 253 253 253
37909 -253 253 253 250 250 250 170 170 170 26 26 26
37910 - 2 2 6 2 2 6 37 26 9 163 110 8
37911 -219 162 10 239 182 13 246 186 14 246 190 14
37912 -246 190 14 246 190 14 246 190 14 246 190 14
37913 -246 190 14 246 190 14 246 190 14 246 190 14
37914 -246 190 14 246 190 14 246 190 14 246 190 14
37915 -246 186 14 236 178 12 224 166 10 142 122 72
37916 - 46 46 46 18 18 18 6 6 6 0 0 0
37917 - 0 0 0 0 0 0 0 0 0 0 0 0
37918 - 0 0 0 0 0 0 0 0 0 0 0 0
37919 - 0 0 0 0 0 0 6 6 6 18 18 18
37920 - 50 50 50 109 106 95 192 133 9 224 166 10
37921 -242 186 14 246 190 14 246 190 14 246 190 14
37922 -246 190 14 246 190 14 246 190 14 246 190 14
37923 -246 190 14 246 190 14 246 190 14 246 190 14
37924 -246 190 14 246 190 14 246 190 14 246 190 14
37925 -242 186 14 226 184 13 210 162 10 142 110 46
37926 -226 226 226 253 253 253 253 253 253 253 253 253
37927 -253 253 253 253 253 253 253 253 253 253 253 253
37928 -253 253 253 253 253 253 253 253 253 253 253 253
37929 -198 198 198 66 66 66 2 2 6 2 2 6
37930 - 2 2 6 2 2 6 50 34 6 156 107 11
37931 -219 162 10 239 182 13 246 186 14 246 190 14
37932 -246 190 14 246 190 14 246 190 14 246 190 14
37933 -246 190 14 246 190 14 246 190 14 246 190 14
37934 -246 190 14 246 190 14 246 190 14 242 186 14
37935 -234 174 13 213 154 11 154 122 46 66 66 66
37936 - 30 30 30 10 10 10 0 0 0 0 0 0
37937 - 0 0 0 0 0 0 0 0 0 0 0 0
37938 - 0 0 0 0 0 0 0 0 0 0 0 0
37939 - 0 0 0 0 0 0 6 6 6 22 22 22
37940 - 58 58 58 154 121 60 206 145 10 234 174 13
37941 -242 186 14 246 186 14 246 190 14 246 190 14
37942 -246 190 14 246 190 14 246 190 14 246 190 14
37943 -246 190 14 246 190 14 246 190 14 246 190 14
37944 -246 190 14 246 190 14 246 190 14 246 190 14
37945 -246 186 14 236 178 12 210 162 10 163 110 8
37946 - 61 42 6 138 138 138 218 218 218 250 250 250
37947 -253 253 253 253 253 253 253 253 253 250 250 250
37948 -242 242 242 210 210 210 144 144 144 66 66 66
37949 - 6 6 6 2 2 6 2 2 6 2 2 6
37950 - 2 2 6 2 2 6 61 42 6 163 110 8
37951 -216 158 10 236 178 12 246 190 14 246 190 14
37952 -246 190 14 246 190 14 246 190 14 246 190 14
37953 -246 190 14 246 190 14 246 190 14 246 190 14
37954 -246 190 14 239 182 13 230 174 11 216 158 10
37955 -190 142 34 124 112 88 70 70 70 38 38 38
37956 - 18 18 18 6 6 6 0 0 0 0 0 0
37957 - 0 0 0 0 0 0 0 0 0 0 0 0
37958 - 0 0 0 0 0 0 0 0 0 0 0 0
37959 - 0 0 0 0 0 0 6 6 6 22 22 22
37960 - 62 62 62 168 124 44 206 145 10 224 166 10
37961 -236 178 12 239 182 13 242 186 14 242 186 14
37962 -246 186 14 246 190 14 246 190 14 246 190 14
37963 -246 190 14 246 190 14 246 190 14 246 190 14
37964 -246 190 14 246 190 14 246 190 14 246 190 14
37965 -246 190 14 236 178 12 216 158 10 175 118 6
37966 - 80 54 7 2 2 6 6 6 6 30 30 30
37967 - 54 54 54 62 62 62 50 50 50 38 38 38
37968 - 14 14 14 2 2 6 2 2 6 2 2 6
37969 - 2 2 6 2 2 6 2 2 6 2 2 6
37970 - 2 2 6 6 6 6 80 54 7 167 114 7
37971 -213 154 11 236 178 12 246 190 14 246 190 14
37972 -246 190 14 246 190 14 246 190 14 246 190 14
37973 -246 190 14 242 186 14 239 182 13 239 182 13
37974 -230 174 11 210 150 10 174 135 50 124 112 88
37975 - 82 82 82 54 54 54 34 34 34 18 18 18
37976 - 6 6 6 0 0 0 0 0 0 0 0 0
37977 - 0 0 0 0 0 0 0 0 0 0 0 0
37978 - 0 0 0 0 0 0 0 0 0 0 0 0
37979 - 0 0 0 0 0 0 6 6 6 18 18 18
37980 - 50 50 50 158 118 36 192 133 9 200 144 11
37981 -216 158 10 219 162 10 224 166 10 226 170 11
37982 -230 174 11 236 178 12 239 182 13 239 182 13
37983 -242 186 14 246 186 14 246 190 14 246 190 14
37984 -246 190 14 246 190 14 246 190 14 246 190 14
37985 -246 186 14 230 174 11 210 150 10 163 110 8
37986 -104 69 6 10 10 10 2 2 6 2 2 6
37987 - 2 2 6 2 2 6 2 2 6 2 2 6
37988 - 2 2 6 2 2 6 2 2 6 2 2 6
37989 - 2 2 6 2 2 6 2 2 6 2 2 6
37990 - 2 2 6 6 6 6 91 60 6 167 114 7
37991 -206 145 10 230 174 11 242 186 14 246 190 14
37992 -246 190 14 246 190 14 246 186 14 242 186 14
37993 -239 182 13 230 174 11 224 166 10 213 154 11
37994 -180 133 36 124 112 88 86 86 86 58 58 58
37995 - 38 38 38 22 22 22 10 10 10 6 6 6
37996 - 0 0 0 0 0 0 0 0 0 0 0 0
37997 - 0 0 0 0 0 0 0 0 0 0 0 0
37998 - 0 0 0 0 0 0 0 0 0 0 0 0
37999 - 0 0 0 0 0 0 0 0 0 14 14 14
38000 - 34 34 34 70 70 70 138 110 50 158 118 36
38001 -167 114 7 180 123 7 192 133 9 197 138 11
38002 -200 144 11 206 145 10 213 154 11 219 162 10
38003 -224 166 10 230 174 11 239 182 13 242 186 14
38004 -246 186 14 246 186 14 246 186 14 246 186 14
38005 -239 182 13 216 158 10 185 133 11 152 99 6
38006 -104 69 6 18 14 6 2 2 6 2 2 6
38007 - 2 2 6 2 2 6 2 2 6 2 2 6
38008 - 2 2 6 2 2 6 2 2 6 2 2 6
38009 - 2 2 6 2 2 6 2 2 6 2 2 6
38010 - 2 2 6 6 6 6 80 54 7 152 99 6
38011 -192 133 9 219 162 10 236 178 12 239 182 13
38012 -246 186 14 242 186 14 239 182 13 236 178 12
38013 -224 166 10 206 145 10 192 133 9 154 121 60
38014 - 94 94 94 62 62 62 42 42 42 22 22 22
38015 - 14 14 14 6 6 6 0 0 0 0 0 0
38016 - 0 0 0 0 0 0 0 0 0 0 0 0
38017 - 0 0 0 0 0 0 0 0 0 0 0 0
38018 - 0 0 0 0 0 0 0 0 0 0 0 0
38019 - 0 0 0 0 0 0 0 0 0 6 6 6
38020 - 18 18 18 34 34 34 58 58 58 78 78 78
38021 -101 98 89 124 112 88 142 110 46 156 107 11
38022 -163 110 8 167 114 7 175 118 6 180 123 7
38023 -185 133 11 197 138 11 210 150 10 219 162 10
38024 -226 170 11 236 178 12 236 178 12 234 174 13
38025 -219 162 10 197 138 11 163 110 8 130 83 6
38026 - 91 60 6 10 10 10 2 2 6 2 2 6
38027 - 18 18 18 38 38 38 38 38 38 38 38 38
38028 - 38 38 38 38 38 38 38 38 38 38 38 38
38029 - 38 38 38 38 38 38 26 26 26 2 2 6
38030 - 2 2 6 6 6 6 70 47 6 137 92 6
38031 -175 118 6 200 144 11 219 162 10 230 174 11
38032 -234 174 13 230 174 11 219 162 10 210 150 10
38033 -192 133 9 163 110 8 124 112 88 82 82 82
38034 - 50 50 50 30 30 30 14 14 14 6 6 6
38035 - 0 0 0 0 0 0 0 0 0 0 0 0
38036 - 0 0 0 0 0 0 0 0 0 0 0 0
38037 - 0 0 0 0 0 0 0 0 0 0 0 0
38038 - 0 0 0 0 0 0 0 0 0 0 0 0
38039 - 0 0 0 0 0 0 0 0 0 0 0 0
38040 - 6 6 6 14 14 14 22 22 22 34 34 34
38041 - 42 42 42 58 58 58 74 74 74 86 86 86
38042 -101 98 89 122 102 70 130 98 46 121 87 25
38043 -137 92 6 152 99 6 163 110 8 180 123 7
38044 -185 133 11 197 138 11 206 145 10 200 144 11
38045 -180 123 7 156 107 11 130 83 6 104 69 6
38046 - 50 34 6 54 54 54 110 110 110 101 98 89
38047 - 86 86 86 82 82 82 78 78 78 78 78 78
38048 - 78 78 78 78 78 78 78 78 78 78 78 78
38049 - 78 78 78 82 82 82 86 86 86 94 94 94
38050 -106 106 106 101 101 101 86 66 34 124 80 6
38051 -156 107 11 180 123 7 192 133 9 200 144 11
38052 -206 145 10 200 144 11 192 133 9 175 118 6
38053 -139 102 15 109 106 95 70 70 70 42 42 42
38054 - 22 22 22 10 10 10 0 0 0 0 0 0
38055 - 0 0 0 0 0 0 0 0 0 0 0 0
38056 - 0 0 0 0 0 0 0 0 0 0 0 0
38057 - 0 0 0 0 0 0 0 0 0 0 0 0
38058 - 0 0 0 0 0 0 0 0 0 0 0 0
38059 - 0 0 0 0 0 0 0 0 0 0 0 0
38060 - 0 0 0 0 0 0 6 6 6 10 10 10
38061 - 14 14 14 22 22 22 30 30 30 38 38 38
38062 - 50 50 50 62 62 62 74 74 74 90 90 90
38063 -101 98 89 112 100 78 121 87 25 124 80 6
38064 -137 92 6 152 99 6 152 99 6 152 99 6
38065 -138 86 6 124 80 6 98 70 6 86 66 30
38066 -101 98 89 82 82 82 58 58 58 46 46 46
38067 - 38 38 38 34 34 34 34 34 34 34 34 34
38068 - 34 34 34 34 34 34 34 34 34 34 34 34
38069 - 34 34 34 34 34 34 38 38 38 42 42 42
38070 - 54 54 54 82 82 82 94 86 76 91 60 6
38071 -134 86 6 156 107 11 167 114 7 175 118 6
38072 -175 118 6 167 114 7 152 99 6 121 87 25
38073 -101 98 89 62 62 62 34 34 34 18 18 18
38074 - 6 6 6 0 0 0 0 0 0 0 0 0
38075 - 0 0 0 0 0 0 0 0 0 0 0 0
38076 - 0 0 0 0 0 0 0 0 0 0 0 0
38077 - 0 0 0 0 0 0 0 0 0 0 0 0
38078 - 0 0 0 0 0 0 0 0 0 0 0 0
38079 - 0 0 0 0 0 0 0 0 0 0 0 0
38080 - 0 0 0 0 0 0 0 0 0 0 0 0
38081 - 0 0 0 6 6 6 6 6 6 10 10 10
38082 - 18 18 18 22 22 22 30 30 30 42 42 42
38083 - 50 50 50 66 66 66 86 86 86 101 98 89
38084 -106 86 58 98 70 6 104 69 6 104 69 6
38085 -104 69 6 91 60 6 82 62 34 90 90 90
38086 - 62 62 62 38 38 38 22 22 22 14 14 14
38087 - 10 10 10 10 10 10 10 10 10 10 10 10
38088 - 10 10 10 10 10 10 6 6 6 10 10 10
38089 - 10 10 10 10 10 10 10 10 10 14 14 14
38090 - 22 22 22 42 42 42 70 70 70 89 81 66
38091 - 80 54 7 104 69 6 124 80 6 137 92 6
38092 -134 86 6 116 81 8 100 82 52 86 86 86
38093 - 58 58 58 30 30 30 14 14 14 6 6 6
38094 - 0 0 0 0 0 0 0 0 0 0 0 0
38095 - 0 0 0 0 0 0 0 0 0 0 0 0
38096 - 0 0 0 0 0 0 0 0 0 0 0 0
38097 - 0 0 0 0 0 0 0 0 0 0 0 0
38098 - 0 0 0 0 0 0 0 0 0 0 0 0
38099 - 0 0 0 0 0 0 0 0 0 0 0 0
38100 - 0 0 0 0 0 0 0 0 0 0 0 0
38101 - 0 0 0 0 0 0 0 0 0 0 0 0
38102 - 0 0 0 6 6 6 10 10 10 14 14 14
38103 - 18 18 18 26 26 26 38 38 38 54 54 54
38104 - 70 70 70 86 86 86 94 86 76 89 81 66
38105 - 89 81 66 86 86 86 74 74 74 50 50 50
38106 - 30 30 30 14 14 14 6 6 6 0 0 0
38107 - 0 0 0 0 0 0 0 0 0 0 0 0
38108 - 0 0 0 0 0 0 0 0 0 0 0 0
38109 - 0 0 0 0 0 0 0 0 0 0 0 0
38110 - 6 6 6 18 18 18 34 34 34 58 58 58
38111 - 82 82 82 89 81 66 89 81 66 89 81 66
38112 - 94 86 66 94 86 76 74 74 74 50 50 50
38113 - 26 26 26 14 14 14 6 6 6 0 0 0
38114 - 0 0 0 0 0 0 0 0 0 0 0 0
38115 - 0 0 0 0 0 0 0 0 0 0 0 0
38116 - 0 0 0 0 0 0 0 0 0 0 0 0
38117 - 0 0 0 0 0 0 0 0 0 0 0 0
38118 - 0 0 0 0 0 0 0 0 0 0 0 0
38119 - 0 0 0 0 0 0 0 0 0 0 0 0
38120 - 0 0 0 0 0 0 0 0 0 0 0 0
38121 - 0 0 0 0 0 0 0 0 0 0 0 0
38122 - 0 0 0 0 0 0 0 0 0 0 0 0
38123 - 6 6 6 6 6 6 14 14 14 18 18 18
38124 - 30 30 30 38 38 38 46 46 46 54 54 54
38125 - 50 50 50 42 42 42 30 30 30 18 18 18
38126 - 10 10 10 0 0 0 0 0 0 0 0 0
38127 - 0 0 0 0 0 0 0 0 0 0 0 0
38128 - 0 0 0 0 0 0 0 0 0 0 0 0
38129 - 0 0 0 0 0 0 0 0 0 0 0 0
38130 - 0 0 0 6 6 6 14 14 14 26 26 26
38131 - 38 38 38 50 50 50 58 58 58 58 58 58
38132 - 54 54 54 42 42 42 30 30 30 18 18 18
38133 - 10 10 10 0 0 0 0 0 0 0 0 0
38134 - 0 0 0 0 0 0 0 0 0 0 0 0
38135 - 0 0 0 0 0 0 0 0 0 0 0 0
38136 - 0 0 0 0 0 0 0 0 0 0 0 0
38137 - 0 0 0 0 0 0 0 0 0 0 0 0
38138 - 0 0 0 0 0 0 0 0 0 0 0 0
38139 - 0 0 0 0 0 0 0 0 0 0 0 0
38140 - 0 0 0 0 0 0 0 0 0 0 0 0
38141 - 0 0 0 0 0 0 0 0 0 0 0 0
38142 - 0 0 0 0 0 0 0 0 0 0 0 0
38143 - 0 0 0 0 0 0 0 0 0 6 6 6
38144 - 6 6 6 10 10 10 14 14 14 18 18 18
38145 - 18 18 18 14 14 14 10 10 10 6 6 6
38146 - 0 0 0 0 0 0 0 0 0 0 0 0
38147 - 0 0 0 0 0 0 0 0 0 0 0 0
38148 - 0 0 0 0 0 0 0 0 0 0 0 0
38149 - 0 0 0 0 0 0 0 0 0 0 0 0
38150 - 0 0 0 0 0 0 0 0 0 6 6 6
38151 - 14 14 14 18 18 18 22 22 22 22 22 22
38152 - 18 18 18 14 14 14 10 10 10 6 6 6
38153 - 0 0 0 0 0 0 0 0 0 0 0 0
38154 - 0 0 0 0 0 0 0 0 0 0 0 0
38155 - 0 0 0 0 0 0 0 0 0 0 0 0
38156 - 0 0 0 0 0 0 0 0 0 0 0 0
38157 - 0 0 0 0 0 0 0 0 0 0 0 0
38158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171 +4 4 4 4 4 4
38172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185 +4 4 4 4 4 4
38186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199 +4 4 4 4 4 4
38200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38213 +4 4 4 4 4 4
38214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38227 +4 4 4 4 4 4
38228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38241 +4 4 4 4 4 4
38242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38246 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38247 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38251 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38252 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38253 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255 +4 4 4 4 4 4
38256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38260 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38261 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38262 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38265 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38266 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38267 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38268 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269 +4 4 4 4 4 4
38270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38274 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38275 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38276 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38279 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38280 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38281 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38282 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38283 +4 4 4 4 4 4
38284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38287 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38288 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38289 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38290 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38292 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38293 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38294 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38295 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38296 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38297 +4 4 4 4 4 4
38298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38301 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38302 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38303 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38304 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38305 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38306 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38307 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38308 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38309 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38310 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38311 +4 4 4 4 4 4
38312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38315 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38316 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38317 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38318 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38319 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38320 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38321 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38322 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38323 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38324 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38325 +4 4 4 4 4 4
38326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38328 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38329 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38330 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38331 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38332 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38333 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38334 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38335 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38336 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38337 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38338 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38339 +4 4 4 4 4 4
38340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38342 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38343 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38344 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38345 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38346 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38347 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38348 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38349 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38350 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38351 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38352 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38353 +4 4 4 4 4 4
38354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38356 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38357 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38358 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38359 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38360 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38361 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38362 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38363 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38364 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38365 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38366 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38367 +4 4 4 4 4 4
38368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38370 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38371 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38372 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38373 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38374 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38375 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38376 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38377 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38378 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38379 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38380 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38381 +4 4 4 4 4 4
38382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38383 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38384 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38385 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38386 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38387 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38388 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38389 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38390 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38391 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38392 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38393 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38394 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38395 +4 4 4 4 4 4
38396 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38397 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38398 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38399 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38400 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38401 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38402 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38403 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38404 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38405 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38406 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38407 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38408 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38409 +0 0 0 4 4 4
38410 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38411 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38412 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38413 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38414 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38415 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38416 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38417 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38418 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38419 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38420 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38421 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38422 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38423 +2 0 0 0 0 0
38424 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38425 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38426 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38427 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38428 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38429 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38430 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38431 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38432 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38433 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38434 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38435 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38436 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38437 +37 38 37 0 0 0
38438 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38439 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38440 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38441 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38442 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38443 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38444 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38445 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38446 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38447 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38448 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38449 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38450 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38451 +85 115 134 4 0 0
38452 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38453 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38454 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38455 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38456 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38457 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38458 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38459 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38460 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38461 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38462 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38463 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38464 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38465 +60 73 81 4 0 0
38466 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38467 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38468 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38469 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38470 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38471 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38472 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38473 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38474 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38475 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38476 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38477 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38478 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38479 +16 19 21 4 0 0
38480 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38481 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38482 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38483 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38484 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38485 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38486 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38487 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38488 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38489 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38490 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38491 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38492 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38493 +4 0 0 4 3 3
38494 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38495 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38496 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38498 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38499 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38500 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38501 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38502 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38503 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38504 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38505 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38506 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38507 +3 2 2 4 4 4
38508 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38509 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38510 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38511 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38512 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38513 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38514 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38515 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38516 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38517 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38518 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38519 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38520 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38521 +4 4 4 4 4 4
38522 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38523 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38524 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38525 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38526 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38527 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38528 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38529 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38530 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38531 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38532 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38533 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38534 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38535 +4 4 4 4 4 4
38536 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38537 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38538 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38539 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38540 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38541 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38542 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38543 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38544 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38545 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38546 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38547 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38548 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38549 +5 5 5 5 5 5
38550 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38551 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38552 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38553 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38554 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38555 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38556 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38557 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38558 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38559 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38560 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38561 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38562 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38563 +5 5 5 4 4 4
38564 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38565 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38566 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38567 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38568 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38569 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38570 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38571 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38572 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38573 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38574 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38575 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38577 +4 4 4 4 4 4
38578 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38579 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38580 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38581 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38582 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38583 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38584 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38585 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38586 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38587 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38588 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38589 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38591 +4 4 4 4 4 4
38592 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38593 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38594 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38595 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38596 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38597 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38598 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38599 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38600 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38601 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38602 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38605 +4 4 4 4 4 4
38606 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38607 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38608 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38609 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38610 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38611 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38612 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38613 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38614 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38615 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38616 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38619 +4 4 4 4 4 4
38620 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38621 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38622 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38623 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38624 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38625 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38626 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38627 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38628 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38629 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38630 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38633 +4 4 4 4 4 4
38634 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38635 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38636 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38637 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38638 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38639 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38640 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38641 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38642 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38643 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38644 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38647 +4 4 4 4 4 4
38648 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38649 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38650 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38651 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38652 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38653 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38654 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38655 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38656 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38657 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38658 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38661 +4 4 4 4 4 4
38662 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38663 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38664 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38665 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38666 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38667 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38668 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38669 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38670 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38671 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38672 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38675 +4 4 4 4 4 4
38676 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38677 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38678 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38679 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38680 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38681 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38682 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38683 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38684 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38685 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38686 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38689 +4 4 4 4 4 4
38690 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38691 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38692 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38693 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38694 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38695 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38696 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38697 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38698 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38699 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38700 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38701 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38703 +4 4 4 4 4 4
38704 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38705 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38706 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38707 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38708 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38709 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38710 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38711 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38712 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38713 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38714 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38717 +4 4 4 4 4 4
38718 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38719 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38720 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38721 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38722 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38723 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38724 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38725 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38726 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38727 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38728 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38731 +4 4 4 4 4 4
38732 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38733 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38734 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38735 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38736 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38737 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38738 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38739 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38740 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38741 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38742 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38745 +4 4 4 4 4 4
38746 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38747 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38748 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38749 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38750 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38751 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38752 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38753 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38754 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38755 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38756 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38759 +4 4 4 4 4 4
38760 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38761 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38762 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38763 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38764 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38765 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38766 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38767 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38768 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38769 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38770 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38773 +4 4 4 4 4 4
38774 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38775 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38776 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38777 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38778 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38779 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38780 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38781 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38782 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38783 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38784 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38787 +4 4 4 4 4 4
38788 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38789 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38790 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38791 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38792 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38793 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38794 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38795 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38796 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38797 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38798 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38801 +4 4 4 4 4 4
38802 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38803 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38804 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38805 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38806 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38807 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38808 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38809 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38810 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38811 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38812 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38815 +4 4 4 4 4 4
38816 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38817 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38818 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38819 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38820 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38821 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38822 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38823 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38824 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38825 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38826 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38829 +4 4 4 4 4 4
38830 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38831 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38832 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38833 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38834 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38835 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38836 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38837 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38838 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38839 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38840 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38843 +4 4 4 4 4 4
38844 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38845 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38846 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38847 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38848 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38849 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38850 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38851 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38852 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38853 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38854 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38857 +4 4 4 4 4 4
38858 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38859 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38860 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38861 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38862 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38863 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38864 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38865 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38866 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38867 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38868 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38871 +4 4 4 4 4 4
38872 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38873 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38874 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38875 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38876 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38877 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38878 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38879 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38880 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38881 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38882 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38885 +4 4 4 4 4 4
38886 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38887 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38888 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38889 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38890 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38891 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38892 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38893 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38894 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38895 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38896 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38899 +4 4 4 4 4 4
38900 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38901 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38902 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38903 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38904 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38905 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38906 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38907 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38908 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38909 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38910 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38913 +4 4 4 4 4 4
38914 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38915 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38916 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38917 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38918 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38919 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38920 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38921 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38922 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38923 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38924 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38927 +4 4 4 4 4 4
38928 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38929 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38930 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38931 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38932 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38933 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38934 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38935 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38936 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38937 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38938 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38941 +4 4 4 4 4 4
38942 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38943 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38944 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38945 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38946 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38947 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38948 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38949 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38950 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38951 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38952 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38955 +4 4 4 4 4 4
38956 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38957 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38958 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38959 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38960 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38961 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38962 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38963 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38964 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38965 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38966 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38969 +4 4 4 4 4 4
38970 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38971 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38972 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38973 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38974 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38975 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38976 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38977 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38978 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38979 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38983 +4 4 4 4 4 4
38984 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38985 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38986 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38987 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38988 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38989 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38990 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38991 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38992 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38993 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38997 +4 4 4 4 4 4
38998 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38999 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
39000 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
39001 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
39002 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
39003 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
39004 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
39005 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
39006 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
39007 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011 +4 4 4 4 4 4
39012 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39013 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39014 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39015 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39016 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39017 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39018 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39019 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39020 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39021 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025 +4 4 4 4 4 4
39026 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39027 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39028 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39029 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39030 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39031 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39032 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39033 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39034 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039 +4 4 4 4 4 4
39040 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39041 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39042 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39043 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39044 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39045 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39046 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39047 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39048 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053 +4 4 4 4 4 4
39054 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39055 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39056 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39057 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39058 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39059 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39060 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39061 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39062 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067 +4 4 4 4 4 4
39068 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39069 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39070 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39071 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39072 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39073 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39074 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39075 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39081 +4 4 4 4 4 4
39082 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39083 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39084 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39085 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39086 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39087 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39088 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39089 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39095 +4 4 4 4 4 4
39096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39097 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39098 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39099 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39100 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39101 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39102 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39103 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39109 +4 4 4 4 4 4
39110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39112 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39113 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39114 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39115 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39116 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39117 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39123 +4 4 4 4 4 4
39124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39126 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39127 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39128 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39129 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39130 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39131 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39137 +4 4 4 4 4 4
39138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39141 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39142 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39143 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39144 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39145 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39151 +4 4 4 4 4 4
39152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39155 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39156 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39157 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39158 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39165 +4 4 4 4 4 4
39166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39170 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39171 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39172 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39179 +4 4 4 4 4 4
39180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39183 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39184 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39185 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39186 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39193 +4 4 4 4 4 4
39194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39197 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39198 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39199 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39200 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39207 +4 4 4 4 4 4
39208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39212 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39213 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39214 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39221 +4 4 4 4 4 4
39222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39227 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39228 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39235 +4 4 4 4 4 4
39236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39240 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39241 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39242 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39249 +4 4 4 4 4 4
39250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39255 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39263 +4 4 4 4 4 4
39264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39268 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39269 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39277 +4 4 4 4 4 4
39278 diff -urNp linux-3.0.8/drivers/video/udlfb.c linux-3.0.8/drivers/video/udlfb.c
39279 --- linux-3.0.8/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
39280 +++ linux-3.0.8/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
39281 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
39282 dlfb_urb_completion(urb);
39283
39284 error:
39285 - atomic_add(bytes_sent, &dev->bytes_sent);
39286 - atomic_add(bytes_identical, &dev->bytes_identical);
39287 - atomic_add(width*height*2, &dev->bytes_rendered);
39288 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39289 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39290 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39291 end_cycles = get_cycles();
39292 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39293 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39294 >> 10)), /* Kcycles */
39295 &dev->cpu_kcycles_used);
39296
39297 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
39298 dlfb_urb_completion(urb);
39299
39300 error:
39301 - atomic_add(bytes_sent, &dev->bytes_sent);
39302 - atomic_add(bytes_identical, &dev->bytes_identical);
39303 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39304 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39305 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39306 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39307 end_cycles = get_cycles();
39308 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39309 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39310 >> 10)), /* Kcycles */
39311 &dev->cpu_kcycles_used);
39312 }
39313 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
39314 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39315 struct dlfb_data *dev = fb_info->par;
39316 return snprintf(buf, PAGE_SIZE, "%u\n",
39317 - atomic_read(&dev->bytes_rendered));
39318 + atomic_read_unchecked(&dev->bytes_rendered));
39319 }
39320
39321 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39322 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
39323 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39324 struct dlfb_data *dev = fb_info->par;
39325 return snprintf(buf, PAGE_SIZE, "%u\n",
39326 - atomic_read(&dev->bytes_identical));
39327 + atomic_read_unchecked(&dev->bytes_identical));
39328 }
39329
39330 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39331 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
39332 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39333 struct dlfb_data *dev = fb_info->par;
39334 return snprintf(buf, PAGE_SIZE, "%u\n",
39335 - atomic_read(&dev->bytes_sent));
39336 + atomic_read_unchecked(&dev->bytes_sent));
39337 }
39338
39339 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39340 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
39341 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39342 struct dlfb_data *dev = fb_info->par;
39343 return snprintf(buf, PAGE_SIZE, "%u\n",
39344 - atomic_read(&dev->cpu_kcycles_used));
39345 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39346 }
39347
39348 static ssize_t edid_show(
39349 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
39350 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39351 struct dlfb_data *dev = fb_info->par;
39352
39353 - atomic_set(&dev->bytes_rendered, 0);
39354 - atomic_set(&dev->bytes_identical, 0);
39355 - atomic_set(&dev->bytes_sent, 0);
39356 - atomic_set(&dev->cpu_kcycles_used, 0);
39357 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39358 + atomic_set_unchecked(&dev->bytes_identical, 0);
39359 + atomic_set_unchecked(&dev->bytes_sent, 0);
39360 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39361
39362 return count;
39363 }
39364 diff -urNp linux-3.0.8/drivers/video/uvesafb.c linux-3.0.8/drivers/video/uvesafb.c
39365 --- linux-3.0.8/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
39366 +++ linux-3.0.8/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
39367 @@ -19,6 +19,7 @@
39368 #include <linux/io.h>
39369 #include <linux/mutex.h>
39370 #include <linux/slab.h>
39371 +#include <linux/moduleloader.h>
39372 #include <video/edid.h>
39373 #include <video/uvesafb.h>
39374 #ifdef CONFIG_X86
39375 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39376 NULL,
39377 };
39378
39379 - return call_usermodehelper(v86d_path, argv, envp, 1);
39380 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39381 }
39382
39383 /*
39384 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
39385 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39386 par->pmi_setpal = par->ypan = 0;
39387 } else {
39388 +
39389 +#ifdef CONFIG_PAX_KERNEXEC
39390 +#ifdef CONFIG_MODULES
39391 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39392 +#endif
39393 + if (!par->pmi_code) {
39394 + par->pmi_setpal = par->ypan = 0;
39395 + return 0;
39396 + }
39397 +#endif
39398 +
39399 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39400 + task->t.regs.edi);
39401 +
39402 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39403 + pax_open_kernel();
39404 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39405 + pax_close_kernel();
39406 +
39407 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39408 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39409 +#else
39410 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39411 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39412 +#endif
39413 +
39414 printk(KERN_INFO "uvesafb: protected mode interface info at "
39415 "%04x:%04x\n",
39416 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39417 @@ -1821,6 +1844,11 @@ out:
39418 if (par->vbe_modes)
39419 kfree(par->vbe_modes);
39420
39421 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39422 + if (par->pmi_code)
39423 + module_free_exec(NULL, par->pmi_code);
39424 +#endif
39425 +
39426 framebuffer_release(info);
39427 return err;
39428 }
39429 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
39430 kfree(par->vbe_state_orig);
39431 if (par->vbe_state_saved)
39432 kfree(par->vbe_state_saved);
39433 +
39434 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39435 + if (par->pmi_code)
39436 + module_free_exec(NULL, par->pmi_code);
39437 +#endif
39438 +
39439 }
39440
39441 framebuffer_release(info);
39442 diff -urNp linux-3.0.8/drivers/video/vesafb.c linux-3.0.8/drivers/video/vesafb.c
39443 --- linux-3.0.8/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
39444 +++ linux-3.0.8/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
39445 @@ -9,6 +9,7 @@
39446 */
39447
39448 #include <linux/module.h>
39449 +#include <linux/moduleloader.h>
39450 #include <linux/kernel.h>
39451 #include <linux/errno.h>
39452 #include <linux/string.h>
39453 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
39454 static int vram_total __initdata; /* Set total amount of memory */
39455 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39456 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39457 -static void (*pmi_start)(void) __read_mostly;
39458 -static void (*pmi_pal) (void) __read_mostly;
39459 +static void (*pmi_start)(void) __read_only;
39460 +static void (*pmi_pal) (void) __read_only;
39461 static int depth __read_mostly;
39462 static int vga_compat __read_mostly;
39463 /* --------------------------------------------------------------------- */
39464 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39465 unsigned int size_vmode;
39466 unsigned int size_remap;
39467 unsigned int size_total;
39468 + void *pmi_code = NULL;
39469
39470 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39471 return -ENODEV;
39472 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39473 size_remap = size_total;
39474 vesafb_fix.smem_len = size_remap;
39475
39476 -#ifndef __i386__
39477 - screen_info.vesapm_seg = 0;
39478 -#endif
39479 -
39480 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39481 printk(KERN_WARNING
39482 "vesafb: cannot reserve video memory at 0x%lx\n",
39483 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
39484 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39485 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39486
39487 +#ifdef __i386__
39488 +
39489 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39490 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39491 + if (!pmi_code)
39492 +#elif !defined(CONFIG_PAX_KERNEXEC)
39493 + if (0)
39494 +#endif
39495 +
39496 +#endif
39497 + screen_info.vesapm_seg = 0;
39498 +
39499 if (screen_info.vesapm_seg) {
39500 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39501 - screen_info.vesapm_seg,screen_info.vesapm_off);
39502 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39503 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39504 }
39505
39506 if (screen_info.vesapm_seg < 0xc000)
39507 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
39508
39509 if (ypan || pmi_setpal) {
39510 unsigned short *pmi_base;
39511 +
39512 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39513 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39514 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39515 +
39516 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39517 + pax_open_kernel();
39518 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39519 +#else
39520 + pmi_code = pmi_base;
39521 +#endif
39522 +
39523 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39524 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39525 +
39526 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39527 + pmi_start = ktva_ktla(pmi_start);
39528 + pmi_pal = ktva_ktla(pmi_pal);
39529 + pax_close_kernel();
39530 +#endif
39531 +
39532 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39533 if (pmi_base[3]) {
39534 printk(KERN_INFO "vesafb: pmi: ports = ");
39535 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
39536 info->node, info->fix.id);
39537 return 0;
39538 err:
39539 +
39540 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39541 + module_free_exec(NULL, pmi_code);
39542 +#endif
39543 +
39544 if (info->screen_base)
39545 iounmap(info->screen_base);
39546 framebuffer_release(info);
39547 diff -urNp linux-3.0.8/drivers/video/via/via_clock.h linux-3.0.8/drivers/video/via/via_clock.h
39548 --- linux-3.0.8/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
39549 +++ linux-3.0.8/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
39550 @@ -56,7 +56,7 @@ struct via_clock {
39551
39552 void (*set_engine_pll_state)(u8 state);
39553 void (*set_engine_pll)(struct via_pll_config config);
39554 -};
39555 +} __no_const;
39556
39557
39558 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39559 diff -urNp linux-3.0.8/drivers/virtio/virtio_balloon.c linux-3.0.8/drivers/virtio/virtio_balloon.c
39560 --- linux-3.0.8/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
39561 +++ linux-3.0.8/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
39562 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
39563 struct sysinfo i;
39564 int idx = 0;
39565
39566 + pax_track_stack();
39567 +
39568 all_vm_events(events);
39569 si_meminfo(&i);
39570
39571 diff -urNp linux-3.0.8/fs/9p/vfs_inode.c linux-3.0.8/fs/9p/vfs_inode.c
39572 --- linux-3.0.8/fs/9p/vfs_inode.c 2011-10-24 08:05:30.000000000 -0400
39573 +++ linux-3.0.8/fs/9p/vfs_inode.c 2011-10-16 21:55:28.000000000 -0400
39574 @@ -1264,7 +1264,7 @@ static void *v9fs_vfs_follow_link(struct
39575 void
39576 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39577 {
39578 - char *s = nd_get_link(nd);
39579 + const char *s = nd_get_link(nd);
39580
39581 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39582 IS_ERR(s) ? "<error>" : s);
39583 diff -urNp linux-3.0.8/fs/aio.c linux-3.0.8/fs/aio.c
39584 --- linux-3.0.8/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
39585 +++ linux-3.0.8/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
39586 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39587 size += sizeof(struct io_event) * nr_events;
39588 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39589
39590 - if (nr_pages < 0)
39591 + if (nr_pages <= 0)
39592 return -EINVAL;
39593
39594 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39595 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39596 struct aio_timeout to;
39597 int retry = 0;
39598
39599 + pax_track_stack();
39600 +
39601 /* needed to zero any padding within an entry (there shouldn't be
39602 * any, but C is fun!
39603 */
39604 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39605 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39606 {
39607 ssize_t ret;
39608 + struct iovec iovstack;
39609
39610 #ifdef CONFIG_COMPAT
39611 if (compat)
39612 ret = compat_rw_copy_check_uvector(type,
39613 (struct compat_iovec __user *)kiocb->ki_buf,
39614 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39615 + kiocb->ki_nbytes, 1, &iovstack,
39616 &kiocb->ki_iovec);
39617 else
39618 #endif
39619 ret = rw_copy_check_uvector(type,
39620 (struct iovec __user *)kiocb->ki_buf,
39621 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39622 + kiocb->ki_nbytes, 1, &iovstack,
39623 &kiocb->ki_iovec);
39624 if (ret < 0)
39625 goto out;
39626
39627 + if (kiocb->ki_iovec == &iovstack) {
39628 + kiocb->ki_inline_vec = iovstack;
39629 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39630 + }
39631 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39632 kiocb->ki_cur_seg = 0;
39633 /* ki_nbytes/left now reflect bytes instead of segs */
39634 diff -urNp linux-3.0.8/fs/attr.c linux-3.0.8/fs/attr.c
39635 --- linux-3.0.8/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
39636 +++ linux-3.0.8/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
39637 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39638 unsigned long limit;
39639
39640 limit = rlimit(RLIMIT_FSIZE);
39641 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39642 if (limit != RLIM_INFINITY && offset > limit)
39643 goto out_sig;
39644 if (offset > inode->i_sb->s_maxbytes)
39645 diff -urNp linux-3.0.8/fs/autofs4/waitq.c linux-3.0.8/fs/autofs4/waitq.c
39646 --- linux-3.0.8/fs/autofs4/waitq.c 2011-07-21 22:17:23.000000000 -0400
39647 +++ linux-3.0.8/fs/autofs4/waitq.c 2011-10-06 04:17:55.000000000 -0400
39648 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39649 {
39650 unsigned long sigpipe, flags;
39651 mm_segment_t fs;
39652 - const char *data = (const char *)addr;
39653 + const char __user *data = (const char __force_user *)addr;
39654 ssize_t wr = 0;
39655
39656 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39657 diff -urNp linux-3.0.8/fs/befs/linuxvfs.c linux-3.0.8/fs/befs/linuxvfs.c
39658 --- linux-3.0.8/fs/befs/linuxvfs.c 2011-10-24 08:05:23.000000000 -0400
39659 +++ linux-3.0.8/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
39660 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39661 {
39662 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39663 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39664 - char *link = nd_get_link(nd);
39665 + const char *link = nd_get_link(nd);
39666 if (!IS_ERR(link))
39667 kfree(link);
39668 }
39669 diff -urNp linux-3.0.8/fs/binfmt_aout.c linux-3.0.8/fs/binfmt_aout.c
39670 --- linux-3.0.8/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
39671 +++ linux-3.0.8/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
39672 @@ -16,6 +16,7 @@
39673 #include <linux/string.h>
39674 #include <linux/fs.h>
39675 #include <linux/file.h>
39676 +#include <linux/security.h>
39677 #include <linux/stat.h>
39678 #include <linux/fcntl.h>
39679 #include <linux/ptrace.h>
39680 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39681 #endif
39682 # define START_STACK(u) ((void __user *)u.start_stack)
39683
39684 + memset(&dump, 0, sizeof(dump));
39685 +
39686 fs = get_fs();
39687 set_fs(KERNEL_DS);
39688 has_dumped = 1;
39689 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39690
39691 /* If the size of the dump file exceeds the rlimit, then see what would happen
39692 if we wrote the stack, but not the data area. */
39693 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39694 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39695 dump.u_dsize = 0;
39696
39697 /* Make sure we have enough room to write the stack and data areas. */
39698 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39699 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39700 dump.u_ssize = 0;
39701
39702 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39703 rlim = rlimit(RLIMIT_DATA);
39704 if (rlim >= RLIM_INFINITY)
39705 rlim = ~0;
39706 +
39707 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39708 if (ex.a_data + ex.a_bss > rlim)
39709 return -ENOMEM;
39710
39711 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39712 install_exec_creds(bprm);
39713 current->flags &= ~PF_FORKNOEXEC;
39714
39715 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39716 + current->mm->pax_flags = 0UL;
39717 +#endif
39718 +
39719 +#ifdef CONFIG_PAX_PAGEEXEC
39720 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39721 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39722 +
39723 +#ifdef CONFIG_PAX_EMUTRAMP
39724 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39725 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39726 +#endif
39727 +
39728 +#ifdef CONFIG_PAX_MPROTECT
39729 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39730 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39731 +#endif
39732 +
39733 + }
39734 +#endif
39735 +
39736 if (N_MAGIC(ex) == OMAGIC) {
39737 unsigned long text_addr, map_size;
39738 loff_t pos;
39739 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39740
39741 down_write(&current->mm->mmap_sem);
39742 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39743 - PROT_READ | PROT_WRITE | PROT_EXEC,
39744 + PROT_READ | PROT_WRITE,
39745 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39746 fd_offset + ex.a_text);
39747 up_write(&current->mm->mmap_sem);
39748 diff -urNp linux-3.0.8/fs/binfmt_elf.c linux-3.0.8/fs/binfmt_elf.c
39749 --- linux-3.0.8/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
39750 +++ linux-3.0.8/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
39751 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39752 #define elf_core_dump NULL
39753 #endif
39754
39755 +#ifdef CONFIG_PAX_MPROTECT
39756 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39757 +#endif
39758 +
39759 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39760 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39761 #else
39762 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39763 .load_binary = load_elf_binary,
39764 .load_shlib = load_elf_library,
39765 .core_dump = elf_core_dump,
39766 +
39767 +#ifdef CONFIG_PAX_MPROTECT
39768 + .handle_mprotect= elf_handle_mprotect,
39769 +#endif
39770 +
39771 .min_coredump = ELF_EXEC_PAGESIZE,
39772 };
39773
39774 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39775
39776 static int set_brk(unsigned long start, unsigned long end)
39777 {
39778 + unsigned long e = end;
39779 +
39780 start = ELF_PAGEALIGN(start);
39781 end = ELF_PAGEALIGN(end);
39782 if (end > start) {
39783 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39784 if (BAD_ADDR(addr))
39785 return addr;
39786 }
39787 - current->mm->start_brk = current->mm->brk = end;
39788 + current->mm->start_brk = current->mm->brk = e;
39789 return 0;
39790 }
39791
39792 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39793 elf_addr_t __user *u_rand_bytes;
39794 const char *k_platform = ELF_PLATFORM;
39795 const char *k_base_platform = ELF_BASE_PLATFORM;
39796 - unsigned char k_rand_bytes[16];
39797 + u32 k_rand_bytes[4];
39798 int items;
39799 elf_addr_t *elf_info;
39800 int ei_index = 0;
39801 const struct cred *cred = current_cred();
39802 struct vm_area_struct *vma;
39803 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39804 +
39805 + pax_track_stack();
39806
39807 /*
39808 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39809 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39810 * Generate 16 random bytes for userspace PRNG seeding.
39811 */
39812 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39813 - u_rand_bytes = (elf_addr_t __user *)
39814 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39815 + srandom32(k_rand_bytes[0] ^ random32());
39816 + srandom32(k_rand_bytes[1] ^ random32());
39817 + srandom32(k_rand_bytes[2] ^ random32());
39818 + srandom32(k_rand_bytes[3] ^ random32());
39819 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39820 + u_rand_bytes = (elf_addr_t __user *) p;
39821 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39822 return -EFAULT;
39823
39824 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39825 return -EFAULT;
39826 current->mm->env_end = p;
39827
39828 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39829 +
39830 /* Put the elf_info on the stack in the right place. */
39831 sp = (elf_addr_t __user *)envp + 1;
39832 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39833 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39834 return -EFAULT;
39835 return 0;
39836 }
39837 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39838 {
39839 struct elf_phdr *elf_phdata;
39840 struct elf_phdr *eppnt;
39841 - unsigned long load_addr = 0;
39842 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39843 int load_addr_set = 0;
39844 unsigned long last_bss = 0, elf_bss = 0;
39845 - unsigned long error = ~0UL;
39846 + unsigned long error = -EINVAL;
39847 unsigned long total_size;
39848 int retval, i, size;
39849
39850 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39851 goto out_close;
39852 }
39853
39854 +#ifdef CONFIG_PAX_SEGMEXEC
39855 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39856 + pax_task_size = SEGMEXEC_TASK_SIZE;
39857 +#endif
39858 +
39859 eppnt = elf_phdata;
39860 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39861 if (eppnt->p_type == PT_LOAD) {
39862 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39863 k = load_addr + eppnt->p_vaddr;
39864 if (BAD_ADDR(k) ||
39865 eppnt->p_filesz > eppnt->p_memsz ||
39866 - eppnt->p_memsz > TASK_SIZE ||
39867 - TASK_SIZE - eppnt->p_memsz < k) {
39868 + eppnt->p_memsz > pax_task_size ||
39869 + pax_task_size - eppnt->p_memsz < k) {
39870 error = -ENOMEM;
39871 goto out_close;
39872 }
39873 @@ -528,6 +553,193 @@ out:
39874 return error;
39875 }
39876
39877 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39878 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39879 +{
39880 + unsigned long pax_flags = 0UL;
39881 +
39882 +#ifdef CONFIG_PAX_PAGEEXEC
39883 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39884 + pax_flags |= MF_PAX_PAGEEXEC;
39885 +#endif
39886 +
39887 +#ifdef CONFIG_PAX_SEGMEXEC
39888 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39889 + pax_flags |= MF_PAX_SEGMEXEC;
39890 +#endif
39891 +
39892 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39893 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39894 + if ((__supported_pte_mask & _PAGE_NX))
39895 + pax_flags &= ~MF_PAX_SEGMEXEC;
39896 + else
39897 + pax_flags &= ~MF_PAX_PAGEEXEC;
39898 + }
39899 +#endif
39900 +
39901 +#ifdef CONFIG_PAX_EMUTRAMP
39902 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39903 + pax_flags |= MF_PAX_EMUTRAMP;
39904 +#endif
39905 +
39906 +#ifdef CONFIG_PAX_MPROTECT
39907 + if (elf_phdata->p_flags & PF_MPROTECT)
39908 + pax_flags |= MF_PAX_MPROTECT;
39909 +#endif
39910 +
39911 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39912 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39913 + pax_flags |= MF_PAX_RANDMMAP;
39914 +#endif
39915 +
39916 + return pax_flags;
39917 +}
39918 +#endif
39919 +
39920 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39921 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39922 +{
39923 + unsigned long pax_flags = 0UL;
39924 +
39925 +#ifdef CONFIG_PAX_PAGEEXEC
39926 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39927 + pax_flags |= MF_PAX_PAGEEXEC;
39928 +#endif
39929 +
39930 +#ifdef CONFIG_PAX_SEGMEXEC
39931 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39932 + pax_flags |= MF_PAX_SEGMEXEC;
39933 +#endif
39934 +
39935 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39936 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39937 + if ((__supported_pte_mask & _PAGE_NX))
39938 + pax_flags &= ~MF_PAX_SEGMEXEC;
39939 + else
39940 + pax_flags &= ~MF_PAX_PAGEEXEC;
39941 + }
39942 +#endif
39943 +
39944 +#ifdef CONFIG_PAX_EMUTRAMP
39945 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39946 + pax_flags |= MF_PAX_EMUTRAMP;
39947 +#endif
39948 +
39949 +#ifdef CONFIG_PAX_MPROTECT
39950 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39951 + pax_flags |= MF_PAX_MPROTECT;
39952 +#endif
39953 +
39954 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39955 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39956 + pax_flags |= MF_PAX_RANDMMAP;
39957 +#endif
39958 +
39959 + return pax_flags;
39960 +}
39961 +#endif
39962 +
39963 +#ifdef CONFIG_PAX_EI_PAX
39964 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39965 +{
39966 + unsigned long pax_flags = 0UL;
39967 +
39968 +#ifdef CONFIG_PAX_PAGEEXEC
39969 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39970 + pax_flags |= MF_PAX_PAGEEXEC;
39971 +#endif
39972 +
39973 +#ifdef CONFIG_PAX_SEGMEXEC
39974 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39975 + pax_flags |= MF_PAX_SEGMEXEC;
39976 +#endif
39977 +
39978 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39979 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39980 + if ((__supported_pte_mask & _PAGE_NX))
39981 + pax_flags &= ~MF_PAX_SEGMEXEC;
39982 + else
39983 + pax_flags &= ~MF_PAX_PAGEEXEC;
39984 + }
39985 +#endif
39986 +
39987 +#ifdef CONFIG_PAX_EMUTRAMP
39988 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39989 + pax_flags |= MF_PAX_EMUTRAMP;
39990 +#endif
39991 +
39992 +#ifdef CONFIG_PAX_MPROTECT
39993 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39994 + pax_flags |= MF_PAX_MPROTECT;
39995 +#endif
39996 +
39997 +#ifdef CONFIG_PAX_ASLR
39998 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39999 + pax_flags |= MF_PAX_RANDMMAP;
40000 +#endif
40001 +
40002 + return pax_flags;
40003 +}
40004 +#endif
40005 +
40006 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40007 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40008 +{
40009 + unsigned long pax_flags = 0UL;
40010 +
40011 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40012 + unsigned long i;
40013 + int found_flags = 0;
40014 +#endif
40015 +
40016 +#ifdef CONFIG_PAX_EI_PAX
40017 + pax_flags = pax_parse_ei_pax(elf_ex);
40018 +#endif
40019 +
40020 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40021 + for (i = 0UL; i < elf_ex->e_phnum; i++)
40022 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40023 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40024 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40025 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40026 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40027 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40028 + return -EINVAL;
40029 +
40030 +#ifdef CONFIG_PAX_SOFTMODE
40031 + if (pax_softmode)
40032 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
40033 + else
40034 +#endif
40035 +
40036 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
40037 + found_flags = 1;
40038 + break;
40039 + }
40040 +#endif
40041 +
40042 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
40043 + if (found_flags == 0) {
40044 + struct elf_phdr phdr;
40045 + memset(&phdr, 0, sizeof(phdr));
40046 + phdr.p_flags = PF_NOEMUTRAMP;
40047 +#ifdef CONFIG_PAX_SOFTMODE
40048 + if (pax_softmode)
40049 + pax_flags = pax_parse_softmode(&phdr);
40050 + else
40051 +#endif
40052 + pax_flags = pax_parse_hardmode(&phdr);
40053 + }
40054 +#endif
40055 +
40056 + if (0 > pax_check_flags(&pax_flags))
40057 + return -EINVAL;
40058 +
40059 + current->mm->pax_flags = pax_flags;
40060 + return 0;
40061 +}
40062 +#endif
40063 +
40064 /*
40065 * These are the functions used to load ELF style executables and shared
40066 * libraries. There is no binary dependent code anywhere else.
40067 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
40068 {
40069 unsigned int random_variable = 0;
40070
40071 +#ifdef CONFIG_PAX_RANDUSTACK
40072 + if (randomize_va_space)
40073 + return stack_top - current->mm->delta_stack;
40074 +#endif
40075 +
40076 if ((current->flags & PF_RANDOMIZE) &&
40077 !(current->personality & ADDR_NO_RANDOMIZE)) {
40078 random_variable = get_random_int() & STACK_RND_MASK;
40079 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
40080 unsigned long load_addr = 0, load_bias = 0;
40081 int load_addr_set = 0;
40082 char * elf_interpreter = NULL;
40083 - unsigned long error;
40084 + unsigned long error = 0;
40085 struct elf_phdr *elf_ppnt, *elf_phdata;
40086 unsigned long elf_bss, elf_brk;
40087 int retval, i;
40088 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
40089 unsigned long start_code, end_code, start_data, end_data;
40090 unsigned long reloc_func_desc __maybe_unused = 0;
40091 int executable_stack = EXSTACK_DEFAULT;
40092 - unsigned long def_flags = 0;
40093 struct {
40094 struct elfhdr elf_ex;
40095 struct elfhdr interp_elf_ex;
40096 } *loc;
40097 + unsigned long pax_task_size = TASK_SIZE;
40098
40099 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40100 if (!loc) {
40101 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
40102
40103 /* OK, This is the point of no return */
40104 current->flags &= ~PF_FORKNOEXEC;
40105 - current->mm->def_flags = def_flags;
40106 +
40107 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40108 + current->mm->pax_flags = 0UL;
40109 +#endif
40110 +
40111 +#ifdef CONFIG_PAX_DLRESOLVE
40112 + current->mm->call_dl_resolve = 0UL;
40113 +#endif
40114 +
40115 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40116 + current->mm->call_syscall = 0UL;
40117 +#endif
40118 +
40119 +#ifdef CONFIG_PAX_ASLR
40120 + current->mm->delta_mmap = 0UL;
40121 + current->mm->delta_stack = 0UL;
40122 +#endif
40123 +
40124 + current->mm->def_flags = 0;
40125 +
40126 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40127 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
40128 + send_sig(SIGKILL, current, 0);
40129 + goto out_free_dentry;
40130 + }
40131 +#endif
40132 +
40133 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40134 + pax_set_initial_flags(bprm);
40135 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40136 + if (pax_set_initial_flags_func)
40137 + (pax_set_initial_flags_func)(bprm);
40138 +#endif
40139 +
40140 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40141 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40142 + current->mm->context.user_cs_limit = PAGE_SIZE;
40143 + current->mm->def_flags |= VM_PAGEEXEC;
40144 + }
40145 +#endif
40146 +
40147 +#ifdef CONFIG_PAX_SEGMEXEC
40148 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40149 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40150 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40151 + pax_task_size = SEGMEXEC_TASK_SIZE;
40152 + current->mm->def_flags |= VM_NOHUGEPAGE;
40153 + }
40154 +#endif
40155 +
40156 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40157 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40158 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40159 + put_cpu();
40160 + }
40161 +#endif
40162
40163 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40164 may depend on the personality. */
40165 SET_PERSONALITY(loc->elf_ex);
40166 +
40167 +#ifdef CONFIG_PAX_ASLR
40168 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40169 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40170 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40171 + }
40172 +#endif
40173 +
40174 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40175 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40176 + executable_stack = EXSTACK_DISABLE_X;
40177 + current->personality &= ~READ_IMPLIES_EXEC;
40178 + } else
40179 +#endif
40180 +
40181 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40182 current->personality |= READ_IMPLIES_EXEC;
40183
40184 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
40185 #else
40186 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40187 #endif
40188 +
40189 +#ifdef CONFIG_PAX_RANDMMAP
40190 + /* PaX: randomize base address at the default exe base if requested */
40191 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40192 +#ifdef CONFIG_SPARC64
40193 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40194 +#else
40195 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40196 +#endif
40197 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40198 + elf_flags |= MAP_FIXED;
40199 + }
40200 +#endif
40201 +
40202 }
40203
40204 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40205 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
40206 * allowed task size. Note that p_filesz must always be
40207 * <= p_memsz so it is only necessary to check p_memsz.
40208 */
40209 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40210 - elf_ppnt->p_memsz > TASK_SIZE ||
40211 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40212 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40213 + elf_ppnt->p_memsz > pax_task_size ||
40214 + pax_task_size - elf_ppnt->p_memsz < k) {
40215 /* set_brk can never work. Avoid overflows. */
40216 send_sig(SIGKILL, current, 0);
40217 retval = -EINVAL;
40218 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
40219 start_data += load_bias;
40220 end_data += load_bias;
40221
40222 +#ifdef CONFIG_PAX_RANDMMAP
40223 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40224 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40225 +#endif
40226 +
40227 /* Calling set_brk effectively mmaps the pages that we need
40228 * for the bss and break sections. We must do this before
40229 * mapping in the interpreter, to make sure it doesn't wind
40230 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
40231 goto out_free_dentry;
40232 }
40233 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40234 - send_sig(SIGSEGV, current, 0);
40235 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40236 - goto out_free_dentry;
40237 + /*
40238 + * This bss-zeroing can fail if the ELF
40239 + * file specifies odd protections. So
40240 + * we don't check the return value
40241 + */
40242 }
40243
40244 if (elf_interpreter) {
40245 @@ -1090,7 +1398,7 @@ out:
40246 * Decide what to dump of a segment, part, all or none.
40247 */
40248 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40249 - unsigned long mm_flags)
40250 + unsigned long mm_flags, long signr)
40251 {
40252 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40253
40254 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
40255 if (vma->vm_file == NULL)
40256 return 0;
40257
40258 - if (FILTER(MAPPED_PRIVATE))
40259 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40260 goto whole;
40261
40262 /*
40263 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
40264 {
40265 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40266 int i = 0;
40267 - do
40268 + do {
40269 i += 2;
40270 - while (auxv[i - 2] != AT_NULL);
40271 + } while (auxv[i - 2] != AT_NULL);
40272 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40273 }
40274
40275 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
40276 }
40277
40278 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40279 - unsigned long mm_flags)
40280 + struct coredump_params *cprm)
40281 {
40282 struct vm_area_struct *vma;
40283 size_t size = 0;
40284
40285 for (vma = first_vma(current, gate_vma); vma != NULL;
40286 vma = next_vma(vma, gate_vma))
40287 - size += vma_dump_size(vma, mm_flags);
40288 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40289 return size;
40290 }
40291
40292 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
40293
40294 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40295
40296 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40297 + offset += elf_core_vma_data_size(gate_vma, cprm);
40298 offset += elf_core_extra_data_size();
40299 e_shoff = offset;
40300
40301 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
40302 offset = dataoff;
40303
40304 size += sizeof(*elf);
40305 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40306 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40307 goto end_coredump;
40308
40309 size += sizeof(*phdr4note);
40310 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40311 if (size > cprm->limit
40312 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40313 goto end_coredump;
40314 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
40315 phdr.p_offset = offset;
40316 phdr.p_vaddr = vma->vm_start;
40317 phdr.p_paddr = 0;
40318 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40319 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40320 phdr.p_memsz = vma->vm_end - vma->vm_start;
40321 offset += phdr.p_filesz;
40322 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40323 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
40324 phdr.p_align = ELF_EXEC_PAGESIZE;
40325
40326 size += sizeof(phdr);
40327 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40328 if (size > cprm->limit
40329 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40330 goto end_coredump;
40331 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
40332 unsigned long addr;
40333 unsigned long end;
40334
40335 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40336 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40337
40338 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40339 struct page *page;
40340 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
40341 page = get_dump_page(addr);
40342 if (page) {
40343 void *kaddr = kmap(page);
40344 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40345 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40346 !dump_write(cprm->file, kaddr,
40347 PAGE_SIZE);
40348 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
40349
40350 if (e_phnum == PN_XNUM) {
40351 size += sizeof(*shdr4extnum);
40352 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40353 if (size > cprm->limit
40354 || !dump_write(cprm->file, shdr4extnum,
40355 sizeof(*shdr4extnum)))
40356 @@ -2067,6 +2380,97 @@ out:
40357
40358 #endif /* CONFIG_ELF_CORE */
40359
40360 +#ifdef CONFIG_PAX_MPROTECT
40361 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40362 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40363 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40364 + *
40365 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40366 + * basis because we want to allow the common case and not the special ones.
40367 + */
40368 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40369 +{
40370 + struct elfhdr elf_h;
40371 + struct elf_phdr elf_p;
40372 + unsigned long i;
40373 + unsigned long oldflags;
40374 + bool is_textrel_rw, is_textrel_rx, is_relro;
40375 +
40376 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40377 + return;
40378 +
40379 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40380 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40381 +
40382 +#ifdef CONFIG_PAX_ELFRELOCS
40383 + /* possible TEXTREL */
40384 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40385 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40386 +#else
40387 + is_textrel_rw = false;
40388 + is_textrel_rx = false;
40389 +#endif
40390 +
40391 + /* possible RELRO */
40392 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40393 +
40394 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40395 + return;
40396 +
40397 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40398 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40399 +
40400 +#ifdef CONFIG_PAX_ETEXECRELOCS
40401 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40402 +#else
40403 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40404 +#endif
40405 +
40406 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40407 + !elf_check_arch(&elf_h) ||
40408 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40409 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40410 + return;
40411 +
40412 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40413 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40414 + return;
40415 + switch (elf_p.p_type) {
40416 + case PT_DYNAMIC:
40417 + if (!is_textrel_rw && !is_textrel_rx)
40418 + continue;
40419 + i = 0UL;
40420 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40421 + elf_dyn dyn;
40422 +
40423 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40424 + return;
40425 + if (dyn.d_tag == DT_NULL)
40426 + return;
40427 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40428 + gr_log_textrel(vma);
40429 + if (is_textrel_rw)
40430 + vma->vm_flags |= VM_MAYWRITE;
40431 + else
40432 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40433 + vma->vm_flags &= ~VM_MAYWRITE;
40434 + return;
40435 + }
40436 + i++;
40437 + }
40438 + return;
40439 +
40440 + case PT_GNU_RELRO:
40441 + if (!is_relro)
40442 + continue;
40443 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40444 + vma->vm_flags &= ~VM_MAYWRITE;
40445 + return;
40446 + }
40447 + }
40448 +}
40449 +#endif
40450 +
40451 static int __init init_elf_binfmt(void)
40452 {
40453 return register_binfmt(&elf_format);
40454 diff -urNp linux-3.0.8/fs/binfmt_flat.c linux-3.0.8/fs/binfmt_flat.c
40455 --- linux-3.0.8/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
40456 +++ linux-3.0.8/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
40457 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
40458 realdatastart = (unsigned long) -ENOMEM;
40459 printk("Unable to allocate RAM for process data, errno %d\n",
40460 (int)-realdatastart);
40461 + down_write(&current->mm->mmap_sem);
40462 do_munmap(current->mm, textpos, text_len);
40463 + up_write(&current->mm->mmap_sem);
40464 ret = realdatastart;
40465 goto err;
40466 }
40467 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
40468 }
40469 if (IS_ERR_VALUE(result)) {
40470 printk("Unable to read data+bss, errno %d\n", (int)-result);
40471 + down_write(&current->mm->mmap_sem);
40472 do_munmap(current->mm, textpos, text_len);
40473 do_munmap(current->mm, realdatastart, len);
40474 + up_write(&current->mm->mmap_sem);
40475 ret = result;
40476 goto err;
40477 }
40478 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
40479 }
40480 if (IS_ERR_VALUE(result)) {
40481 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40482 + down_write(&current->mm->mmap_sem);
40483 do_munmap(current->mm, textpos, text_len + data_len + extra +
40484 MAX_SHARED_LIBS * sizeof(unsigned long));
40485 + up_write(&current->mm->mmap_sem);
40486 ret = result;
40487 goto err;
40488 }
40489 diff -urNp linux-3.0.8/fs/bio.c linux-3.0.8/fs/bio.c
40490 --- linux-3.0.8/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
40491 +++ linux-3.0.8/fs/bio.c 2011-10-06 04:17:55.000000000 -0400
40492 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
40493 const int read = bio_data_dir(bio) == READ;
40494 struct bio_map_data *bmd = bio->bi_private;
40495 int i;
40496 - char *p = bmd->sgvecs[0].iov_base;
40497 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40498
40499 __bio_for_each_segment(bvec, bio, i, 0) {
40500 char *addr = page_address(bvec->bv_page);
40501 diff -urNp linux-3.0.8/fs/block_dev.c linux-3.0.8/fs/block_dev.c
40502 --- linux-3.0.8/fs/block_dev.c 2011-10-24 08:05:30.000000000 -0400
40503 +++ linux-3.0.8/fs/block_dev.c 2011-10-16 21:55:28.000000000 -0400
40504 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
40505 else if (bdev->bd_contains == bdev)
40506 return true; /* is a whole device which isn't held */
40507
40508 - else if (whole->bd_holder == bd_may_claim)
40509 + else if (whole->bd_holder == (void *)bd_may_claim)
40510 return true; /* is a partition of a device that is being partitioned */
40511 else if (whole->bd_holder != NULL)
40512 return false; /* is a partition of a held device */
40513 diff -urNp linux-3.0.8/fs/btrfs/ctree.c linux-3.0.8/fs/btrfs/ctree.c
40514 --- linux-3.0.8/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
40515 +++ linux-3.0.8/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
40516 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
40517 free_extent_buffer(buf);
40518 add_root_to_dirty_list(root);
40519 } else {
40520 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40521 - parent_start = parent->start;
40522 - else
40523 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40524 + if (parent)
40525 + parent_start = parent->start;
40526 + else
40527 + parent_start = 0;
40528 + } else
40529 parent_start = 0;
40530
40531 WARN_ON(trans->transid != btrfs_header_generation(parent));
40532 diff -urNp linux-3.0.8/fs/btrfs/inode.c linux-3.0.8/fs/btrfs/inode.c
40533 --- linux-3.0.8/fs/btrfs/inode.c 2011-10-24 08:05:30.000000000 -0400
40534 +++ linux-3.0.8/fs/btrfs/inode.c 2011-10-16 21:55:28.000000000 -0400
40535 @@ -6896,7 +6896,7 @@ fail:
40536 return -ENOMEM;
40537 }
40538
40539 -static int btrfs_getattr(struct vfsmount *mnt,
40540 +int btrfs_getattr(struct vfsmount *mnt,
40541 struct dentry *dentry, struct kstat *stat)
40542 {
40543 struct inode *inode = dentry->d_inode;
40544 @@ -6908,6 +6908,14 @@ static int btrfs_getattr(struct vfsmount
40545 return 0;
40546 }
40547
40548 +EXPORT_SYMBOL(btrfs_getattr);
40549 +
40550 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40551 +{
40552 + return BTRFS_I(inode)->root->anon_super.s_dev;
40553 +}
40554 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40555 +
40556 /*
40557 * If a file is moved, it will inherit the cow and compression flags of the new
40558 * directory.
40559 diff -urNp linux-3.0.8/fs/btrfs/ioctl.c linux-3.0.8/fs/btrfs/ioctl.c
40560 --- linux-3.0.8/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
40561 +++ linux-3.0.8/fs/btrfs/ioctl.c 2011-10-06 04:17:55.000000000 -0400
40562 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
40563 for (i = 0; i < num_types; i++) {
40564 struct btrfs_space_info *tmp;
40565
40566 + /* Don't copy in more than we allocated */
40567 if (!slot_count)
40568 break;
40569
40570 + slot_count--;
40571 +
40572 info = NULL;
40573 rcu_read_lock();
40574 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40575 @@ -2700,15 +2703,12 @@ long btrfs_ioctl_space_info(struct btrfs
40576 memcpy(dest, &space, sizeof(space));
40577 dest++;
40578 space_args.total_spaces++;
40579 - slot_count--;
40580 }
40581 - if (!slot_count)
40582 - break;
40583 }
40584 up_read(&info->groups_sem);
40585 }
40586
40587 - user_dest = (struct btrfs_ioctl_space_info *)
40588 + user_dest = (struct btrfs_ioctl_space_info __user *)
40589 (arg + sizeof(struct btrfs_ioctl_space_args));
40590
40591 if (copy_to_user(user_dest, dest_orig, alloc_size))
40592 diff -urNp linux-3.0.8/fs/btrfs/relocation.c linux-3.0.8/fs/btrfs/relocation.c
40593 --- linux-3.0.8/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
40594 +++ linux-3.0.8/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
40595 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40596 }
40597 spin_unlock(&rc->reloc_root_tree.lock);
40598
40599 - BUG_ON((struct btrfs_root *)node->data != root);
40600 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40601
40602 if (!del) {
40603 spin_lock(&rc->reloc_root_tree.lock);
40604 diff -urNp linux-3.0.8/fs/cachefiles/bind.c linux-3.0.8/fs/cachefiles/bind.c
40605 --- linux-3.0.8/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
40606 +++ linux-3.0.8/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
40607 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40608 args);
40609
40610 /* start by checking things over */
40611 - ASSERT(cache->fstop_percent >= 0 &&
40612 - cache->fstop_percent < cache->fcull_percent &&
40613 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40614 cache->fcull_percent < cache->frun_percent &&
40615 cache->frun_percent < 100);
40616
40617 - ASSERT(cache->bstop_percent >= 0 &&
40618 - cache->bstop_percent < cache->bcull_percent &&
40619 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40620 cache->bcull_percent < cache->brun_percent &&
40621 cache->brun_percent < 100);
40622
40623 diff -urNp linux-3.0.8/fs/cachefiles/daemon.c linux-3.0.8/fs/cachefiles/daemon.c
40624 --- linux-3.0.8/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
40625 +++ linux-3.0.8/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
40626 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40627 if (n > buflen)
40628 return -EMSGSIZE;
40629
40630 - if (copy_to_user(_buffer, buffer, n) != 0)
40631 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40632 return -EFAULT;
40633
40634 return n;
40635 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40636 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40637 return -EIO;
40638
40639 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40640 + if (datalen > PAGE_SIZE - 1)
40641 return -EOPNOTSUPP;
40642
40643 /* drag the command string into the kernel so we can parse it */
40644 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40645 if (args[0] != '%' || args[1] != '\0')
40646 return -EINVAL;
40647
40648 - if (fstop < 0 || fstop >= cache->fcull_percent)
40649 + if (fstop >= cache->fcull_percent)
40650 return cachefiles_daemon_range_error(cache, args);
40651
40652 cache->fstop_percent = fstop;
40653 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40654 if (args[0] != '%' || args[1] != '\0')
40655 return -EINVAL;
40656
40657 - if (bstop < 0 || bstop >= cache->bcull_percent)
40658 + if (bstop >= cache->bcull_percent)
40659 return cachefiles_daemon_range_error(cache, args);
40660
40661 cache->bstop_percent = bstop;
40662 diff -urNp linux-3.0.8/fs/cachefiles/internal.h linux-3.0.8/fs/cachefiles/internal.h
40663 --- linux-3.0.8/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
40664 +++ linux-3.0.8/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
40665 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40666 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40667 struct rb_root active_nodes; /* active nodes (can't be culled) */
40668 rwlock_t active_lock; /* lock for active_nodes */
40669 - atomic_t gravecounter; /* graveyard uniquifier */
40670 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40671 unsigned frun_percent; /* when to stop culling (% files) */
40672 unsigned fcull_percent; /* when to start culling (% files) */
40673 unsigned fstop_percent; /* when to stop allocating (% files) */
40674 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40675 * proc.c
40676 */
40677 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40678 -extern atomic_t cachefiles_lookup_histogram[HZ];
40679 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40680 -extern atomic_t cachefiles_create_histogram[HZ];
40681 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40682 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40683 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40684
40685 extern int __init cachefiles_proc_init(void);
40686 extern void cachefiles_proc_cleanup(void);
40687 static inline
40688 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40689 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40690 {
40691 unsigned long jif = jiffies - start_jif;
40692 if (jif >= HZ)
40693 jif = HZ - 1;
40694 - atomic_inc(&histogram[jif]);
40695 + atomic_inc_unchecked(&histogram[jif]);
40696 }
40697
40698 #else
40699 diff -urNp linux-3.0.8/fs/cachefiles/namei.c linux-3.0.8/fs/cachefiles/namei.c
40700 --- linux-3.0.8/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
40701 +++ linux-3.0.8/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
40702 @@ -318,7 +318,7 @@ try_again:
40703 /* first step is to make up a grave dentry in the graveyard */
40704 sprintf(nbuffer, "%08x%08x",
40705 (uint32_t) get_seconds(),
40706 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40707 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40708
40709 /* do the multiway lock magic */
40710 trap = lock_rename(cache->graveyard, dir);
40711 diff -urNp linux-3.0.8/fs/cachefiles/proc.c linux-3.0.8/fs/cachefiles/proc.c
40712 --- linux-3.0.8/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
40713 +++ linux-3.0.8/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
40714 @@ -14,9 +14,9 @@
40715 #include <linux/seq_file.h>
40716 #include "internal.h"
40717
40718 -atomic_t cachefiles_lookup_histogram[HZ];
40719 -atomic_t cachefiles_mkdir_histogram[HZ];
40720 -atomic_t cachefiles_create_histogram[HZ];
40721 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40722 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40723 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40724
40725 /*
40726 * display the latency histogram
40727 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40728 return 0;
40729 default:
40730 index = (unsigned long) v - 3;
40731 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40732 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40733 - z = atomic_read(&cachefiles_create_histogram[index]);
40734 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40735 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40736 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40737 if (x == 0 && y == 0 && z == 0)
40738 return 0;
40739
40740 diff -urNp linux-3.0.8/fs/cachefiles/rdwr.c linux-3.0.8/fs/cachefiles/rdwr.c
40741 --- linux-3.0.8/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
40742 +++ linux-3.0.8/fs/cachefiles/rdwr.c 2011-10-06 04:17:55.000000000 -0400
40743 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40744 old_fs = get_fs();
40745 set_fs(KERNEL_DS);
40746 ret = file->f_op->write(
40747 - file, (const void __user *) data, len, &pos);
40748 + file, (const void __force_user *) data, len, &pos);
40749 set_fs(old_fs);
40750 kunmap(page);
40751 if (ret != len)
40752 diff -urNp linux-3.0.8/fs/ceph/dir.c linux-3.0.8/fs/ceph/dir.c
40753 --- linux-3.0.8/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
40754 +++ linux-3.0.8/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
40755 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
40756 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40757 struct ceph_mds_client *mdsc = fsc->mdsc;
40758 unsigned frag = fpos_frag(filp->f_pos);
40759 - int off = fpos_off(filp->f_pos);
40760 + unsigned int off = fpos_off(filp->f_pos);
40761 int err;
40762 u32 ftype;
40763 struct ceph_mds_reply_info_parsed *rinfo;
40764 diff -urNp linux-3.0.8/fs/cifs/cifs_debug.c linux-3.0.8/fs/cifs/cifs_debug.c
40765 --- linux-3.0.8/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
40766 +++ linux-3.0.8/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
40767 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40768
40769 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40770 #ifdef CONFIG_CIFS_STATS2
40771 - atomic_set(&totBufAllocCount, 0);
40772 - atomic_set(&totSmBufAllocCount, 0);
40773 + atomic_set_unchecked(&totBufAllocCount, 0);
40774 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40775 #endif /* CONFIG_CIFS_STATS2 */
40776 spin_lock(&cifs_tcp_ses_lock);
40777 list_for_each(tmp1, &cifs_tcp_ses_list) {
40778 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40779 tcon = list_entry(tmp3,
40780 struct cifs_tcon,
40781 tcon_list);
40782 - atomic_set(&tcon->num_smbs_sent, 0);
40783 - atomic_set(&tcon->num_writes, 0);
40784 - atomic_set(&tcon->num_reads, 0);
40785 - atomic_set(&tcon->num_oplock_brks, 0);
40786 - atomic_set(&tcon->num_opens, 0);
40787 - atomic_set(&tcon->num_posixopens, 0);
40788 - atomic_set(&tcon->num_posixmkdirs, 0);
40789 - atomic_set(&tcon->num_closes, 0);
40790 - atomic_set(&tcon->num_deletes, 0);
40791 - atomic_set(&tcon->num_mkdirs, 0);
40792 - atomic_set(&tcon->num_rmdirs, 0);
40793 - atomic_set(&tcon->num_renames, 0);
40794 - atomic_set(&tcon->num_t2renames, 0);
40795 - atomic_set(&tcon->num_ffirst, 0);
40796 - atomic_set(&tcon->num_fnext, 0);
40797 - atomic_set(&tcon->num_fclose, 0);
40798 - atomic_set(&tcon->num_hardlinks, 0);
40799 - atomic_set(&tcon->num_symlinks, 0);
40800 - atomic_set(&tcon->num_locks, 0);
40801 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40802 + atomic_set_unchecked(&tcon->num_writes, 0);
40803 + atomic_set_unchecked(&tcon->num_reads, 0);
40804 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40805 + atomic_set_unchecked(&tcon->num_opens, 0);
40806 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40807 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40808 + atomic_set_unchecked(&tcon->num_closes, 0);
40809 + atomic_set_unchecked(&tcon->num_deletes, 0);
40810 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40811 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40812 + atomic_set_unchecked(&tcon->num_renames, 0);
40813 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40814 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40815 + atomic_set_unchecked(&tcon->num_fnext, 0);
40816 + atomic_set_unchecked(&tcon->num_fclose, 0);
40817 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40818 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40819 + atomic_set_unchecked(&tcon->num_locks, 0);
40820 }
40821 }
40822 }
40823 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40824 smBufAllocCount.counter, cifs_min_small);
40825 #ifdef CONFIG_CIFS_STATS2
40826 seq_printf(m, "Total Large %d Small %d Allocations\n",
40827 - atomic_read(&totBufAllocCount),
40828 - atomic_read(&totSmBufAllocCount));
40829 + atomic_read_unchecked(&totBufAllocCount),
40830 + atomic_read_unchecked(&totSmBufAllocCount));
40831 #endif /* CONFIG_CIFS_STATS2 */
40832
40833 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40834 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40835 if (tcon->need_reconnect)
40836 seq_puts(m, "\tDISCONNECTED ");
40837 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40838 - atomic_read(&tcon->num_smbs_sent),
40839 - atomic_read(&tcon->num_oplock_brks));
40840 + atomic_read_unchecked(&tcon->num_smbs_sent),
40841 + atomic_read_unchecked(&tcon->num_oplock_brks));
40842 seq_printf(m, "\nReads: %d Bytes: %lld",
40843 - atomic_read(&tcon->num_reads),
40844 + atomic_read_unchecked(&tcon->num_reads),
40845 (long long)(tcon->bytes_read));
40846 seq_printf(m, "\nWrites: %d Bytes: %lld",
40847 - atomic_read(&tcon->num_writes),
40848 + atomic_read_unchecked(&tcon->num_writes),
40849 (long long)(tcon->bytes_written));
40850 seq_printf(m, "\nFlushes: %d",
40851 - atomic_read(&tcon->num_flushes));
40852 + atomic_read_unchecked(&tcon->num_flushes));
40853 seq_printf(m, "\nLocks: %d HardLinks: %d "
40854 "Symlinks: %d",
40855 - atomic_read(&tcon->num_locks),
40856 - atomic_read(&tcon->num_hardlinks),
40857 - atomic_read(&tcon->num_symlinks));
40858 + atomic_read_unchecked(&tcon->num_locks),
40859 + atomic_read_unchecked(&tcon->num_hardlinks),
40860 + atomic_read_unchecked(&tcon->num_symlinks));
40861 seq_printf(m, "\nOpens: %d Closes: %d "
40862 "Deletes: %d",
40863 - atomic_read(&tcon->num_opens),
40864 - atomic_read(&tcon->num_closes),
40865 - atomic_read(&tcon->num_deletes));
40866 + atomic_read_unchecked(&tcon->num_opens),
40867 + atomic_read_unchecked(&tcon->num_closes),
40868 + atomic_read_unchecked(&tcon->num_deletes));
40869 seq_printf(m, "\nPosix Opens: %d "
40870 "Posix Mkdirs: %d",
40871 - atomic_read(&tcon->num_posixopens),
40872 - atomic_read(&tcon->num_posixmkdirs));
40873 + atomic_read_unchecked(&tcon->num_posixopens),
40874 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40875 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40876 - atomic_read(&tcon->num_mkdirs),
40877 - atomic_read(&tcon->num_rmdirs));
40878 + atomic_read_unchecked(&tcon->num_mkdirs),
40879 + atomic_read_unchecked(&tcon->num_rmdirs));
40880 seq_printf(m, "\nRenames: %d T2 Renames %d",
40881 - atomic_read(&tcon->num_renames),
40882 - atomic_read(&tcon->num_t2renames));
40883 + atomic_read_unchecked(&tcon->num_renames),
40884 + atomic_read_unchecked(&tcon->num_t2renames));
40885 seq_printf(m, "\nFindFirst: %d FNext %d "
40886 "FClose %d",
40887 - atomic_read(&tcon->num_ffirst),
40888 - atomic_read(&tcon->num_fnext),
40889 - atomic_read(&tcon->num_fclose));
40890 + atomic_read_unchecked(&tcon->num_ffirst),
40891 + atomic_read_unchecked(&tcon->num_fnext),
40892 + atomic_read_unchecked(&tcon->num_fclose));
40893 }
40894 }
40895 }
40896 diff -urNp linux-3.0.8/fs/cifs/cifsfs.c linux-3.0.8/fs/cifs/cifsfs.c
40897 --- linux-3.0.8/fs/cifs/cifsfs.c 2011-10-25 09:10:33.000000000 -0400
40898 +++ linux-3.0.8/fs/cifs/cifsfs.c 2011-10-25 09:10:41.000000000 -0400
40899 @@ -996,7 +996,7 @@ cifs_init_request_bufs(void)
40900 cifs_req_cachep = kmem_cache_create("cifs_request",
40901 CIFSMaxBufSize +
40902 MAX_CIFS_HDR_SIZE, 0,
40903 - SLAB_HWCACHE_ALIGN, NULL);
40904 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40905 if (cifs_req_cachep == NULL)
40906 return -ENOMEM;
40907
40908 @@ -1023,7 +1023,7 @@ cifs_init_request_bufs(void)
40909 efficient to alloc 1 per page off the slab compared to 17K (5page)
40910 alloc of large cifs buffers even when page debugging is on */
40911 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40912 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40913 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40914 NULL);
40915 if (cifs_sm_req_cachep == NULL) {
40916 mempool_destroy(cifs_req_poolp);
40917 @@ -1108,8 +1108,8 @@ init_cifs(void)
40918 atomic_set(&bufAllocCount, 0);
40919 atomic_set(&smBufAllocCount, 0);
40920 #ifdef CONFIG_CIFS_STATS2
40921 - atomic_set(&totBufAllocCount, 0);
40922 - atomic_set(&totSmBufAllocCount, 0);
40923 + atomic_set_unchecked(&totBufAllocCount, 0);
40924 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40925 #endif /* CONFIG_CIFS_STATS2 */
40926
40927 atomic_set(&midCount, 0);
40928 diff -urNp linux-3.0.8/fs/cifs/cifsglob.h linux-3.0.8/fs/cifs/cifsglob.h
40929 --- linux-3.0.8/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
40930 +++ linux-3.0.8/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
40931 @@ -381,28 +381,28 @@ struct cifs_tcon {
40932 __u16 Flags; /* optional support bits */
40933 enum statusEnum tidStatus;
40934 #ifdef CONFIG_CIFS_STATS
40935 - atomic_t num_smbs_sent;
40936 - atomic_t num_writes;
40937 - atomic_t num_reads;
40938 - atomic_t num_flushes;
40939 - atomic_t num_oplock_brks;
40940 - atomic_t num_opens;
40941 - atomic_t num_closes;
40942 - atomic_t num_deletes;
40943 - atomic_t num_mkdirs;
40944 - atomic_t num_posixopens;
40945 - atomic_t num_posixmkdirs;
40946 - atomic_t num_rmdirs;
40947 - atomic_t num_renames;
40948 - atomic_t num_t2renames;
40949 - atomic_t num_ffirst;
40950 - atomic_t num_fnext;
40951 - atomic_t num_fclose;
40952 - atomic_t num_hardlinks;
40953 - atomic_t num_symlinks;
40954 - atomic_t num_locks;
40955 - atomic_t num_acl_get;
40956 - atomic_t num_acl_set;
40957 + atomic_unchecked_t num_smbs_sent;
40958 + atomic_unchecked_t num_writes;
40959 + atomic_unchecked_t num_reads;
40960 + atomic_unchecked_t num_flushes;
40961 + atomic_unchecked_t num_oplock_brks;
40962 + atomic_unchecked_t num_opens;
40963 + atomic_unchecked_t num_closes;
40964 + atomic_unchecked_t num_deletes;
40965 + atomic_unchecked_t num_mkdirs;
40966 + atomic_unchecked_t num_posixopens;
40967 + atomic_unchecked_t num_posixmkdirs;
40968 + atomic_unchecked_t num_rmdirs;
40969 + atomic_unchecked_t num_renames;
40970 + atomic_unchecked_t num_t2renames;
40971 + atomic_unchecked_t num_ffirst;
40972 + atomic_unchecked_t num_fnext;
40973 + atomic_unchecked_t num_fclose;
40974 + atomic_unchecked_t num_hardlinks;
40975 + atomic_unchecked_t num_symlinks;
40976 + atomic_unchecked_t num_locks;
40977 + atomic_unchecked_t num_acl_get;
40978 + atomic_unchecked_t num_acl_set;
40979 #ifdef CONFIG_CIFS_STATS2
40980 unsigned long long time_writes;
40981 unsigned long long time_reads;
40982 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40983 }
40984
40985 #ifdef CONFIG_CIFS_STATS
40986 -#define cifs_stats_inc atomic_inc
40987 +#define cifs_stats_inc atomic_inc_unchecked
40988
40989 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40990 unsigned int bytes)
40991 @@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40992 /* Various Debug counters */
40993 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40994 #ifdef CONFIG_CIFS_STATS2
40995 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40996 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40997 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40998 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40999 #endif
41000 GLOBAL_EXTERN atomic_t smBufAllocCount;
41001 GLOBAL_EXTERN atomic_t midCount;
41002 diff -urNp linux-3.0.8/fs/cifs/link.c linux-3.0.8/fs/cifs/link.c
41003 --- linux-3.0.8/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
41004 +++ linux-3.0.8/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
41005 @@ -587,7 +587,7 @@ symlink_exit:
41006
41007 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41008 {
41009 - char *p = nd_get_link(nd);
41010 + const char *p = nd_get_link(nd);
41011 if (!IS_ERR(p))
41012 kfree(p);
41013 }
41014 diff -urNp linux-3.0.8/fs/cifs/misc.c linux-3.0.8/fs/cifs/misc.c
41015 --- linux-3.0.8/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
41016 +++ linux-3.0.8/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
41017 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41018 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41019 atomic_inc(&bufAllocCount);
41020 #ifdef CONFIG_CIFS_STATS2
41021 - atomic_inc(&totBufAllocCount);
41022 + atomic_inc_unchecked(&totBufAllocCount);
41023 #endif /* CONFIG_CIFS_STATS2 */
41024 }
41025
41026 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41027 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41028 atomic_inc(&smBufAllocCount);
41029 #ifdef CONFIG_CIFS_STATS2
41030 - atomic_inc(&totSmBufAllocCount);
41031 + atomic_inc_unchecked(&totSmBufAllocCount);
41032 #endif /* CONFIG_CIFS_STATS2 */
41033
41034 }
41035 diff -urNp linux-3.0.8/fs/coda/cache.c linux-3.0.8/fs/coda/cache.c
41036 --- linux-3.0.8/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
41037 +++ linux-3.0.8/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
41038 @@ -24,7 +24,7 @@
41039 #include "coda_linux.h"
41040 #include "coda_cache.h"
41041
41042 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41043 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41044
41045 /* replace or extend an acl cache hit */
41046 void coda_cache_enter(struct inode *inode, int mask)
41047 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
41048 struct coda_inode_info *cii = ITOC(inode);
41049
41050 spin_lock(&cii->c_lock);
41051 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41052 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41053 if (cii->c_uid != current_fsuid()) {
41054 cii->c_uid = current_fsuid();
41055 cii->c_cached_perm = mask;
41056 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
41057 {
41058 struct coda_inode_info *cii = ITOC(inode);
41059 spin_lock(&cii->c_lock);
41060 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41061 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41062 spin_unlock(&cii->c_lock);
41063 }
41064
41065 /* remove all acl caches */
41066 void coda_cache_clear_all(struct super_block *sb)
41067 {
41068 - atomic_inc(&permission_epoch);
41069 + atomic_inc_unchecked(&permission_epoch);
41070 }
41071
41072
41073 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
41074 spin_lock(&cii->c_lock);
41075 hit = (mask & cii->c_cached_perm) == mask &&
41076 cii->c_uid == current_fsuid() &&
41077 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41078 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41079 spin_unlock(&cii->c_lock);
41080
41081 return hit;
41082 diff -urNp linux-3.0.8/fs/compat_binfmt_elf.c linux-3.0.8/fs/compat_binfmt_elf.c
41083 --- linux-3.0.8/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
41084 +++ linux-3.0.8/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
41085 @@ -30,11 +30,13 @@
41086 #undef elf_phdr
41087 #undef elf_shdr
41088 #undef elf_note
41089 +#undef elf_dyn
41090 #undef elf_addr_t
41091 #define elfhdr elf32_hdr
41092 #define elf_phdr elf32_phdr
41093 #define elf_shdr elf32_shdr
41094 #define elf_note elf32_note
41095 +#define elf_dyn Elf32_Dyn
41096 #define elf_addr_t Elf32_Addr
41097
41098 /*
41099 diff -urNp linux-3.0.8/fs/compat.c linux-3.0.8/fs/compat.c
41100 --- linux-3.0.8/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
41101 +++ linux-3.0.8/fs/compat.c 2011-10-06 04:17:55.000000000 -0400
41102 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
41103 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41104 {
41105 compat_ino_t ino = stat->ino;
41106 - typeof(ubuf->st_uid) uid = 0;
41107 - typeof(ubuf->st_gid) gid = 0;
41108 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41109 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41110 int err;
41111
41112 SET_UID(uid, stat->uid);
41113 @@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
41114
41115 set_fs(KERNEL_DS);
41116 /* The __user pointer cast is valid because of the set_fs() */
41117 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41118 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41119 set_fs(oldfs);
41120 /* truncating is ok because it's a user address */
41121 if (!ret)
41122 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
41123 goto out;
41124
41125 ret = -EINVAL;
41126 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41127 + if (nr_segs > UIO_MAXIOV)
41128 goto out;
41129 if (nr_segs > fast_segs) {
41130 ret = -ENOMEM;
41131 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
41132
41133 struct compat_readdir_callback {
41134 struct compat_old_linux_dirent __user *dirent;
41135 + struct file * file;
41136 int result;
41137 };
41138
41139 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
41140 buf->result = -EOVERFLOW;
41141 return -EOVERFLOW;
41142 }
41143 +
41144 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41145 + return 0;
41146 +
41147 buf->result++;
41148 dirent = buf->dirent;
41149 if (!access_ok(VERIFY_WRITE, dirent,
41150 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
41151
41152 buf.result = 0;
41153 buf.dirent = dirent;
41154 + buf.file = file;
41155
41156 error = vfs_readdir(file, compat_fillonedir, &buf);
41157 if (buf.result)
41158 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
41159 struct compat_getdents_callback {
41160 struct compat_linux_dirent __user *current_dir;
41161 struct compat_linux_dirent __user *previous;
41162 + struct file * file;
41163 int count;
41164 int error;
41165 };
41166 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
41167 buf->error = -EOVERFLOW;
41168 return -EOVERFLOW;
41169 }
41170 +
41171 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41172 + return 0;
41173 +
41174 dirent = buf->previous;
41175 if (dirent) {
41176 if (__put_user(offset, &dirent->d_off))
41177 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
41178 buf.previous = NULL;
41179 buf.count = count;
41180 buf.error = 0;
41181 + buf.file = file;
41182
41183 error = vfs_readdir(file, compat_filldir, &buf);
41184 if (error >= 0)
41185 @@ -1006,6 +1018,7 @@ out:
41186 struct compat_getdents_callback64 {
41187 struct linux_dirent64 __user *current_dir;
41188 struct linux_dirent64 __user *previous;
41189 + struct file * file;
41190 int count;
41191 int error;
41192 };
41193 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
41194 buf->error = -EINVAL; /* only used if we fail.. */
41195 if (reclen > buf->count)
41196 return -EINVAL;
41197 +
41198 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41199 + return 0;
41200 +
41201 dirent = buf->previous;
41202
41203 if (dirent) {
41204 @@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
41205 buf.previous = NULL;
41206 buf.count = count;
41207 buf.error = 0;
41208 + buf.file = file;
41209
41210 error = vfs_readdir(file, compat_filldir64, &buf);
41211 if (error >= 0)
41212 error = buf.error;
41213 lastdirent = buf.previous;
41214 if (lastdirent) {
41215 - typeof(lastdirent->d_off) d_off = file->f_pos;
41216 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41217 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41218 error = -EFAULT;
41219 else
41220 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
41221 struct fdtable *fdt;
41222 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
41223
41224 + pax_track_stack();
41225 +
41226 if (n < 0)
41227 goto out_nofds;
41228
41229 @@ -1904,7 +1924,7 @@ asmlinkage long compat_sys_nfsservctl(in
41230 oldfs = get_fs();
41231 set_fs(KERNEL_DS);
41232 /* The __user pointer casts are valid because of the set_fs() */
41233 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
41234 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
41235 set_fs(oldfs);
41236
41237 if (err)
41238 diff -urNp linux-3.0.8/fs/compat_ioctl.c linux-3.0.8/fs/compat_ioctl.c
41239 --- linux-3.0.8/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
41240 +++ linux-3.0.8/fs/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
41241 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
41242
41243 err = get_user(palp, &up->palette);
41244 err |= get_user(length, &up->length);
41245 + if (err)
41246 + return -EFAULT;
41247
41248 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41249 err = put_user(compat_ptr(palp), &up_native->palette);
41250 @@ -619,7 +621,7 @@ static int serial_struct_ioctl(unsigned
41251 return -EFAULT;
41252 if (__get_user(udata, &ss32->iomem_base))
41253 return -EFAULT;
41254 - ss.iomem_base = compat_ptr(udata);
41255 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41256 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41257 __get_user(ss.port_high, &ss32->port_high))
41258 return -EFAULT;
41259 @@ -794,7 +796,7 @@ static int compat_ioctl_preallocate(stru
41260 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41261 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41262 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41263 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41264 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41265 return -EFAULT;
41266
41267 return ioctl_preallocate(file, p);
41268 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
41269 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41270 {
41271 unsigned int a, b;
41272 - a = *(unsigned int *)p;
41273 - b = *(unsigned int *)q;
41274 + a = *(const unsigned int *)p;
41275 + b = *(const unsigned int *)q;
41276 if (a > b)
41277 return 1;
41278 if (a < b)
41279 diff -urNp linux-3.0.8/fs/configfs/dir.c linux-3.0.8/fs/configfs/dir.c
41280 --- linux-3.0.8/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
41281 +++ linux-3.0.8/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
41282 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
41283 }
41284 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41285 struct configfs_dirent *next;
41286 - const char * name;
41287 + const unsigned char * name;
41288 + char d_name[sizeof(next->s_dentry->d_iname)];
41289 int len;
41290 struct inode *inode = NULL;
41291
41292 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
41293 continue;
41294
41295 name = configfs_get_name(next);
41296 - len = strlen(name);
41297 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41298 + len = next->s_dentry->d_name.len;
41299 + memcpy(d_name, name, len);
41300 + name = d_name;
41301 + } else
41302 + len = strlen(name);
41303
41304 /*
41305 * We'll have a dentry and an inode for
41306 diff -urNp linux-3.0.8/fs/dcache.c linux-3.0.8/fs/dcache.c
41307 --- linux-3.0.8/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
41308 +++ linux-3.0.8/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
41309 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
41310 mempages -= reserve;
41311
41312 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41313 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41314 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41315
41316 dcache_init();
41317 inode_init();
41318 diff -urNp linux-3.0.8/fs/ecryptfs/inode.c linux-3.0.8/fs/ecryptfs/inode.c
41319 --- linux-3.0.8/fs/ecryptfs/inode.c 2011-10-24 08:05:21.000000000 -0400
41320 +++ linux-3.0.8/fs/ecryptfs/inode.c 2011-10-06 04:17:55.000000000 -0400
41321 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
41322 old_fs = get_fs();
41323 set_fs(get_ds());
41324 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41325 - (char __user *)lower_buf,
41326 + (char __force_user *)lower_buf,
41327 lower_bufsiz);
41328 set_fs(old_fs);
41329 if (rc < 0)
41330 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
41331 }
41332 old_fs = get_fs();
41333 set_fs(get_ds());
41334 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41335 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41336 set_fs(old_fs);
41337 if (rc < 0) {
41338 kfree(buf);
41339 @@ -765,7 +765,7 @@ out:
41340 static void
41341 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41342 {
41343 - char *buf = nd_get_link(nd);
41344 + const char *buf = nd_get_link(nd);
41345 if (!IS_ERR(buf)) {
41346 /* Free the char* */
41347 kfree(buf);
41348 diff -urNp linux-3.0.8/fs/ecryptfs/miscdev.c linux-3.0.8/fs/ecryptfs/miscdev.c
41349 --- linux-3.0.8/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
41350 +++ linux-3.0.8/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
41351 @@ -328,7 +328,7 @@ check_list:
41352 goto out_unlock_msg_ctx;
41353 i = 5;
41354 if (msg_ctx->msg) {
41355 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41356 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41357 goto out_unlock_msg_ctx;
41358 i += packet_length_size;
41359 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41360 diff -urNp linux-3.0.8/fs/ecryptfs/read_write.c linux-3.0.8/fs/ecryptfs/read_write.c
41361 --- linux-3.0.8/fs/ecryptfs/read_write.c 2011-10-24 08:05:21.000000000 -0400
41362 +++ linux-3.0.8/fs/ecryptfs/read_write.c 2011-10-06 04:17:55.000000000 -0400
41363 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
41364 return -EIO;
41365 fs_save = get_fs();
41366 set_fs(get_ds());
41367 - rc = vfs_write(lower_file, data, size, &offset);
41368 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41369 set_fs(fs_save);
41370 mark_inode_dirty_sync(ecryptfs_inode);
41371 return rc;
41372 @@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
41373 return -EIO;
41374 fs_save = get_fs();
41375 set_fs(get_ds());
41376 - rc = vfs_read(lower_file, data, size, &offset);
41377 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41378 set_fs(fs_save);
41379 return rc;
41380 }
41381 diff -urNp linux-3.0.8/fs/exec.c linux-3.0.8/fs/exec.c
41382 --- linux-3.0.8/fs/exec.c 2011-10-24 08:05:32.000000000 -0400
41383 +++ linux-3.0.8/fs/exec.c 2011-10-17 23:17:19.000000000 -0400
41384 @@ -55,12 +55,24 @@
41385 #include <linux/pipe_fs_i.h>
41386 #include <linux/oom.h>
41387 #include <linux/compat.h>
41388 +#include <linux/random.h>
41389 +#include <linux/seq_file.h>
41390 +
41391 +#ifdef CONFIG_PAX_REFCOUNT
41392 +#include <linux/kallsyms.h>
41393 +#include <linux/kdebug.h>
41394 +#endif
41395
41396 #include <asm/uaccess.h>
41397 #include <asm/mmu_context.h>
41398 #include <asm/tlb.h>
41399 #include "internal.h"
41400
41401 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41402 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41403 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41404 +#endif
41405 +
41406 int core_uses_pid;
41407 char core_pattern[CORENAME_MAX_SIZE] = "core";
41408 unsigned int core_pipe_limit;
41409 @@ -70,7 +82,7 @@ struct core_name {
41410 char *corename;
41411 int used, size;
41412 };
41413 -static atomic_t call_count = ATOMIC_INIT(1);
41414 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41415
41416 /* The maximal length of core_pattern is also specified in sysctl.c */
41417
41418 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
41419 char *tmp = getname(library);
41420 int error = PTR_ERR(tmp);
41421 static const struct open_flags uselib_flags = {
41422 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
41423 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
41424 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
41425 .intent = LOOKUP_OPEN
41426 };
41427 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
41428 int write)
41429 {
41430 struct page *page;
41431 - int ret;
41432
41433 -#ifdef CONFIG_STACK_GROWSUP
41434 - if (write) {
41435 - ret = expand_downwards(bprm->vma, pos);
41436 - if (ret < 0)
41437 - return NULL;
41438 - }
41439 -#endif
41440 - ret = get_user_pages(current, bprm->mm, pos,
41441 - 1, write, 1, &page, NULL);
41442 - if (ret <= 0)
41443 + if (0 > expand_downwards(bprm->vma, pos))
41444 + return NULL;
41445 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41446 return NULL;
41447
41448 if (write) {
41449 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
41450 vma->vm_end = STACK_TOP_MAX;
41451 vma->vm_start = vma->vm_end - PAGE_SIZE;
41452 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41453 +
41454 +#ifdef CONFIG_PAX_SEGMEXEC
41455 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41456 +#endif
41457 +
41458 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41459 INIT_LIST_HEAD(&vma->anon_vma_chain);
41460
41461 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
41462 mm->stack_vm = mm->total_vm = 1;
41463 up_write(&mm->mmap_sem);
41464 bprm->p = vma->vm_end - sizeof(void *);
41465 +
41466 +#ifdef CONFIG_PAX_RANDUSTACK
41467 + if (randomize_va_space)
41468 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41469 +#endif
41470 +
41471 return 0;
41472 err:
41473 up_write(&mm->mmap_sem);
41474 @@ -403,19 +418,7 @@ err:
41475 return err;
41476 }
41477
41478 -struct user_arg_ptr {
41479 -#ifdef CONFIG_COMPAT
41480 - bool is_compat;
41481 -#endif
41482 - union {
41483 - const char __user *const __user *native;
41484 -#ifdef CONFIG_COMPAT
41485 - compat_uptr_t __user *compat;
41486 -#endif
41487 - } ptr;
41488 -};
41489 -
41490 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41491 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41492 {
41493 const char __user *native;
41494
41495 @@ -424,14 +427,14 @@ static const char __user *get_user_arg_p
41496 compat_uptr_t compat;
41497
41498 if (get_user(compat, argv.ptr.compat + nr))
41499 - return ERR_PTR(-EFAULT);
41500 + return (const char __force_user *)ERR_PTR(-EFAULT);
41501
41502 return compat_ptr(compat);
41503 }
41504 #endif
41505
41506 if (get_user(native, argv.ptr.native + nr))
41507 - return ERR_PTR(-EFAULT);
41508 + return (const char __force_user *)ERR_PTR(-EFAULT);
41509
41510 return native;
41511 }
41512 @@ -450,7 +453,7 @@ static int count(struct user_arg_ptr arg
41513 if (!p)
41514 break;
41515
41516 - if (IS_ERR(p))
41517 + if (IS_ERR((const char __force_kernel *)p))
41518 return -EFAULT;
41519
41520 if (i++ >= max)
41521 @@ -484,7 +487,7 @@ static int copy_strings(int argc, struct
41522
41523 ret = -EFAULT;
41524 str = get_user_arg_ptr(argv, argc);
41525 - if (IS_ERR(str))
41526 + if (IS_ERR((const char __force_kernel *)str))
41527 goto out;
41528
41529 len = strnlen_user(str, MAX_ARG_STRLEN);
41530 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
41531 int r;
41532 mm_segment_t oldfs = get_fs();
41533 struct user_arg_ptr argv = {
41534 - .ptr.native = (const char __user *const __user *)__argv,
41535 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41536 };
41537
41538 set_fs(KERNEL_DS);
41539 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
41540 unsigned long new_end = old_end - shift;
41541 struct mmu_gather tlb;
41542
41543 - BUG_ON(new_start > new_end);
41544 + if (new_start >= new_end || new_start < mmap_min_addr)
41545 + return -ENOMEM;
41546
41547 /*
41548 * ensure there are no vmas between where we want to go
41549 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
41550 if (vma != find_vma(mm, new_start))
41551 return -EFAULT;
41552
41553 +#ifdef CONFIG_PAX_SEGMEXEC
41554 + BUG_ON(pax_find_mirror_vma(vma));
41555 +#endif
41556 +
41557 /*
41558 * cover the whole range: [new_start, old_end)
41559 */
41560 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
41561 stack_top = arch_align_stack(stack_top);
41562 stack_top = PAGE_ALIGN(stack_top);
41563
41564 - if (unlikely(stack_top < mmap_min_addr) ||
41565 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41566 - return -ENOMEM;
41567 -
41568 stack_shift = vma->vm_end - stack_top;
41569
41570 bprm->p -= stack_shift;
41571 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
41572 bprm->exec -= stack_shift;
41573
41574 down_write(&mm->mmap_sem);
41575 +
41576 + /* Move stack pages down in memory. */
41577 + if (stack_shift) {
41578 + ret = shift_arg_pages(vma, stack_shift);
41579 + if (ret)
41580 + goto out_unlock;
41581 + }
41582 +
41583 vm_flags = VM_STACK_FLAGS;
41584
41585 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41586 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41587 + vm_flags &= ~VM_EXEC;
41588 +
41589 +#ifdef CONFIG_PAX_MPROTECT
41590 + if (mm->pax_flags & MF_PAX_MPROTECT)
41591 + vm_flags &= ~VM_MAYEXEC;
41592 +#endif
41593 +
41594 + }
41595 +#endif
41596 +
41597 /*
41598 * Adjust stack execute permissions; explicitly enable for
41599 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41600 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
41601 goto out_unlock;
41602 BUG_ON(prev != vma);
41603
41604 - /* Move stack pages down in memory. */
41605 - if (stack_shift) {
41606 - ret = shift_arg_pages(vma, stack_shift);
41607 - if (ret)
41608 - goto out_unlock;
41609 - }
41610 -
41611 /* mprotect_fixup is overkill to remove the temporary stack flags */
41612 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41613
41614 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
41615 struct file *file;
41616 int err;
41617 static const struct open_flags open_exec_flags = {
41618 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
41619 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
41620 .acc_mode = MAY_EXEC | MAY_OPEN,
41621 .intent = LOOKUP_OPEN
41622 };
41623 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
41624 old_fs = get_fs();
41625 set_fs(get_ds());
41626 /* The cast to a user pointer is valid due to the set_fs() */
41627 - result = vfs_read(file, (void __user *)addr, count, &pos);
41628 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41629 set_fs(old_fs);
41630 return result;
41631 }
41632 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
41633 }
41634 rcu_read_unlock();
41635
41636 - if (p->fs->users > n_fs) {
41637 + if (atomic_read(&p->fs->users) > n_fs) {
41638 bprm->unsafe |= LSM_UNSAFE_SHARE;
41639 } else {
41640 res = -EAGAIN;
41641 @@ -1430,11 +1447,35 @@ static int do_execve_common(const char *
41642 struct user_arg_ptr envp,
41643 struct pt_regs *regs)
41644 {
41645 +#ifdef CONFIG_GRKERNSEC
41646 + struct file *old_exec_file;
41647 + struct acl_subject_label *old_acl;
41648 + struct rlimit old_rlim[RLIM_NLIMITS];
41649 +#endif
41650 struct linux_binprm *bprm;
41651 struct file *file;
41652 struct files_struct *displaced;
41653 bool clear_in_exec;
41654 int retval;
41655 + const struct cred *cred = current_cred();
41656 +
41657 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41658 +
41659 + /*
41660 + * We move the actual failure in case of RLIMIT_NPROC excess from
41661 + * set*uid() to execve() because too many poorly written programs
41662 + * don't check setuid() return code. Here we additionally recheck
41663 + * whether NPROC limit is still exceeded.
41664 + */
41665 + if ((current->flags & PF_NPROC_EXCEEDED) &&
41666 + atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
41667 + retval = -EAGAIN;
41668 + goto out_ret;
41669 + }
41670 +
41671 + /* We're below the limit (still or again), so we don't want to make
41672 + * further execve() calls fail. */
41673 + current->flags &= ~PF_NPROC_EXCEEDED;
41674
41675 retval = unshare_files(&displaced);
41676 if (retval)
41677 @@ -1466,6 +1507,16 @@ static int do_execve_common(const char *
41678 bprm->filename = filename;
41679 bprm->interp = filename;
41680
41681 + if (gr_process_user_ban()) {
41682 + retval = -EPERM;
41683 + goto out_file;
41684 + }
41685 +
41686 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41687 + retval = -EACCES;
41688 + goto out_file;
41689 + }
41690 +
41691 retval = bprm_mm_init(bprm);
41692 if (retval)
41693 goto out_file;
41694 @@ -1495,9 +1546,40 @@ static int do_execve_common(const char *
41695 if (retval < 0)
41696 goto out;
41697
41698 + if (!gr_tpe_allow(file)) {
41699 + retval = -EACCES;
41700 + goto out;
41701 + }
41702 +
41703 + if (gr_check_crash_exec(file)) {
41704 + retval = -EACCES;
41705 + goto out;
41706 + }
41707 +
41708 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41709 +
41710 + gr_handle_exec_args(bprm, argv);
41711 +
41712 +#ifdef CONFIG_GRKERNSEC
41713 + old_acl = current->acl;
41714 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41715 + old_exec_file = current->exec_file;
41716 + get_file(file);
41717 + current->exec_file = file;
41718 +#endif
41719 +
41720 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41721 + bprm->unsafe & LSM_UNSAFE_SHARE);
41722 + if (retval < 0)
41723 + goto out_fail;
41724 +
41725 retval = search_binary_handler(bprm,regs);
41726 if (retval < 0)
41727 - goto out;
41728 + goto out_fail;
41729 +#ifdef CONFIG_GRKERNSEC
41730 + if (old_exec_file)
41731 + fput(old_exec_file);
41732 +#endif
41733
41734 /* execve succeeded */
41735 current->fs->in_exec = 0;
41736 @@ -1508,6 +1590,14 @@ static int do_execve_common(const char *
41737 put_files_struct(displaced);
41738 return retval;
41739
41740 +out_fail:
41741 +#ifdef CONFIG_GRKERNSEC
41742 + current->acl = old_acl;
41743 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41744 + fput(current->exec_file);
41745 + current->exec_file = old_exec_file;
41746 +#endif
41747 +
41748 out:
41749 if (bprm->mm) {
41750 acct_arg_size(bprm, 0);
41751 @@ -1581,7 +1671,7 @@ static int expand_corename(struct core_n
41752 {
41753 char *old_corename = cn->corename;
41754
41755 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41756 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41757 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41758
41759 if (!cn->corename) {
41760 @@ -1669,7 +1759,7 @@ static int format_corename(struct core_n
41761 int pid_in_pattern = 0;
41762 int err = 0;
41763
41764 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41765 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41766 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41767 cn->used = 0;
41768
41769 @@ -1760,6 +1850,219 @@ out:
41770 return ispipe;
41771 }
41772
41773 +int pax_check_flags(unsigned long *flags)
41774 +{
41775 + int retval = 0;
41776 +
41777 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41778 + if (*flags & MF_PAX_SEGMEXEC)
41779 + {
41780 + *flags &= ~MF_PAX_SEGMEXEC;
41781 + retval = -EINVAL;
41782 + }
41783 +#endif
41784 +
41785 + if ((*flags & MF_PAX_PAGEEXEC)
41786 +
41787 +#ifdef CONFIG_PAX_PAGEEXEC
41788 + && (*flags & MF_PAX_SEGMEXEC)
41789 +#endif
41790 +
41791 + )
41792 + {
41793 + *flags &= ~MF_PAX_PAGEEXEC;
41794 + retval = -EINVAL;
41795 + }
41796 +
41797 + if ((*flags & MF_PAX_MPROTECT)
41798 +
41799 +#ifdef CONFIG_PAX_MPROTECT
41800 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41801 +#endif
41802 +
41803 + )
41804 + {
41805 + *flags &= ~MF_PAX_MPROTECT;
41806 + retval = -EINVAL;
41807 + }
41808 +
41809 + if ((*flags & MF_PAX_EMUTRAMP)
41810 +
41811 +#ifdef CONFIG_PAX_EMUTRAMP
41812 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41813 +#endif
41814 +
41815 + )
41816 + {
41817 + *flags &= ~MF_PAX_EMUTRAMP;
41818 + retval = -EINVAL;
41819 + }
41820 +
41821 + return retval;
41822 +}
41823 +
41824 +EXPORT_SYMBOL(pax_check_flags);
41825 +
41826 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41827 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41828 +{
41829 + struct task_struct *tsk = current;
41830 + struct mm_struct *mm = current->mm;
41831 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41832 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41833 + char *path_exec = NULL;
41834 + char *path_fault = NULL;
41835 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41836 +
41837 + if (buffer_exec && buffer_fault) {
41838 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41839 +
41840 + down_read(&mm->mmap_sem);
41841 + vma = mm->mmap;
41842 + while (vma && (!vma_exec || !vma_fault)) {
41843 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41844 + vma_exec = vma;
41845 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41846 + vma_fault = vma;
41847 + vma = vma->vm_next;
41848 + }
41849 + if (vma_exec) {
41850 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41851 + if (IS_ERR(path_exec))
41852 + path_exec = "<path too long>";
41853 + else {
41854 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41855 + if (path_exec) {
41856 + *path_exec = 0;
41857 + path_exec = buffer_exec;
41858 + } else
41859 + path_exec = "<path too long>";
41860 + }
41861 + }
41862 + if (vma_fault) {
41863 + start = vma_fault->vm_start;
41864 + end = vma_fault->vm_end;
41865 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41866 + if (vma_fault->vm_file) {
41867 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41868 + if (IS_ERR(path_fault))
41869 + path_fault = "<path too long>";
41870 + else {
41871 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41872 + if (path_fault) {
41873 + *path_fault = 0;
41874 + path_fault = buffer_fault;
41875 + } else
41876 + path_fault = "<path too long>";
41877 + }
41878 + } else
41879 + path_fault = "<anonymous mapping>";
41880 + }
41881 + up_read(&mm->mmap_sem);
41882 + }
41883 + if (tsk->signal->curr_ip)
41884 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41885 + else
41886 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41887 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41888 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41889 + task_uid(tsk), task_euid(tsk), pc, sp);
41890 + free_page((unsigned long)buffer_exec);
41891 + free_page((unsigned long)buffer_fault);
41892 + pax_report_insns(pc, sp);
41893 + do_coredump(SIGKILL, SIGKILL, regs);
41894 +}
41895 +#endif
41896 +
41897 +#ifdef CONFIG_PAX_REFCOUNT
41898 +void pax_report_refcount_overflow(struct pt_regs *regs)
41899 +{
41900 + if (current->signal->curr_ip)
41901 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41902 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41903 + else
41904 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41905 + current->comm, task_pid_nr(current), current_uid(), current_euid());
41906 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41907 + show_regs(regs);
41908 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41909 +}
41910 +#endif
41911 +
41912 +#ifdef CONFIG_PAX_USERCOPY
41913 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41914 +int object_is_on_stack(const void *obj, unsigned long len)
41915 +{
41916 + const void * const stack = task_stack_page(current);
41917 + const void * const stackend = stack + THREAD_SIZE;
41918 +
41919 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41920 + const void *frame = NULL;
41921 + const void *oldframe;
41922 +#endif
41923 +
41924 + if (obj + len < obj)
41925 + return -1;
41926 +
41927 + if (obj + len <= stack || stackend <= obj)
41928 + return 0;
41929 +
41930 + if (obj < stack || stackend < obj + len)
41931 + return -1;
41932 +
41933 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41934 + oldframe = __builtin_frame_address(1);
41935 + if (oldframe)
41936 + frame = __builtin_frame_address(2);
41937 + /*
41938 + low ----------------------------------------------> high
41939 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41940 + ^----------------^
41941 + allow copies only within here
41942 + */
41943 + while (stack <= frame && frame < stackend) {
41944 + /* if obj + len extends past the last frame, this
41945 + check won't pass and the next frame will be 0,
41946 + causing us to bail out and correctly report
41947 + the copy as invalid
41948 + */
41949 + if (obj + len <= frame)
41950 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41951 + oldframe = frame;
41952 + frame = *(const void * const *)frame;
41953 + }
41954 + return -1;
41955 +#else
41956 + return 1;
41957 +#endif
41958 +}
41959 +
41960 +
41961 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41962 +{
41963 + if (current->signal->curr_ip)
41964 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41965 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41966 + else
41967 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41968 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41969 + dump_stack();
41970 + gr_handle_kernel_exploit();
41971 + do_group_exit(SIGKILL);
41972 +}
41973 +#endif
41974 +
41975 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41976 +void pax_track_stack(void)
41977 +{
41978 + unsigned long sp = (unsigned long)&sp;
41979 + if (sp < current_thread_info()->lowest_stack &&
41980 + sp > (unsigned long)task_stack_page(current))
41981 + current_thread_info()->lowest_stack = sp;
41982 +}
41983 +EXPORT_SYMBOL(pax_track_stack);
41984 +#endif
41985 +
41986 static int zap_process(struct task_struct *start, int exit_code)
41987 {
41988 struct task_struct *t;
41989 @@ -1971,17 +2274,17 @@ static void wait_for_dump_helpers(struct
41990 pipe = file->f_path.dentry->d_inode->i_pipe;
41991
41992 pipe_lock(pipe);
41993 - pipe->readers++;
41994 - pipe->writers--;
41995 + atomic_inc(&pipe->readers);
41996 + atomic_dec(&pipe->writers);
41997
41998 - while ((pipe->readers > 1) && (!signal_pending(current))) {
41999 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42000 wake_up_interruptible_sync(&pipe->wait);
42001 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42002 pipe_wait(pipe);
42003 }
42004
42005 - pipe->readers--;
42006 - pipe->writers++;
42007 + atomic_dec(&pipe->readers);
42008 + atomic_inc(&pipe->writers);
42009 pipe_unlock(pipe);
42010
42011 }
42012 @@ -2042,7 +2345,7 @@ void do_coredump(long signr, int exit_co
42013 int retval = 0;
42014 int flag = 0;
42015 int ispipe;
42016 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42017 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42018 struct coredump_params cprm = {
42019 .signr = signr,
42020 .regs = regs,
42021 @@ -2057,6 +2360,9 @@ void do_coredump(long signr, int exit_co
42022
42023 audit_core_dumps(signr);
42024
42025 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42026 + gr_handle_brute_attach(current, cprm.mm_flags);
42027 +
42028 binfmt = mm->binfmt;
42029 if (!binfmt || !binfmt->core_dump)
42030 goto fail;
42031 @@ -2097,6 +2403,8 @@ void do_coredump(long signr, int exit_co
42032 goto fail_corename;
42033 }
42034
42035 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42036 +
42037 if (ispipe) {
42038 int dump_count;
42039 char **helper_argv;
42040 @@ -2124,7 +2432,7 @@ void do_coredump(long signr, int exit_co
42041 }
42042 cprm.limit = RLIM_INFINITY;
42043
42044 - dump_count = atomic_inc_return(&core_dump_count);
42045 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42046 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42047 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42048 task_tgid_vnr(current), current->comm);
42049 @@ -2194,7 +2502,7 @@ close_fail:
42050 filp_close(cprm.file, NULL);
42051 fail_dropcount:
42052 if (ispipe)
42053 - atomic_dec(&core_dump_count);
42054 + atomic_dec_unchecked(&core_dump_count);
42055 fail_unlock:
42056 kfree(cn.corename);
42057 fail_corename:
42058 @@ -2213,7 +2521,7 @@ fail:
42059 */
42060 int dump_write(struct file *file, const void *addr, int nr)
42061 {
42062 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42063 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42064 }
42065 EXPORT_SYMBOL(dump_write);
42066
42067 diff -urNp linux-3.0.8/fs/ext2/balloc.c linux-3.0.8/fs/ext2/balloc.c
42068 --- linux-3.0.8/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
42069 +++ linux-3.0.8/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
42070 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
42071
42072 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42073 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42074 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42075 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42076 sbi->s_resuid != current_fsuid() &&
42077 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42078 return 0;
42079 diff -urNp linux-3.0.8/fs/ext3/balloc.c linux-3.0.8/fs/ext3/balloc.c
42080 --- linux-3.0.8/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
42081 +++ linux-3.0.8/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
42082 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
42083
42084 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42085 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42086 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42087 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42088 sbi->s_resuid != current_fsuid() &&
42089 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42090 return 0;
42091 diff -urNp linux-3.0.8/fs/ext3/ioctl.c linux-3.0.8/fs/ext3/ioctl.c
42092 --- linux-3.0.8/fs/ext3/ioctl.c 2011-07-21 22:17:23.000000000 -0400
42093 +++ linux-3.0.8/fs/ext3/ioctl.c 2011-10-06 04:17:55.000000000 -0400
42094 @@ -285,7 +285,7 @@ group_add_out:
42095 if (!capable(CAP_SYS_ADMIN))
42096 return -EPERM;
42097
42098 - if (copy_from_user(&range, (struct fstrim_range *)arg,
42099 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
42100 sizeof(range)))
42101 return -EFAULT;
42102
42103 @@ -293,7 +293,7 @@ group_add_out:
42104 if (ret < 0)
42105 return ret;
42106
42107 - if (copy_to_user((struct fstrim_range *)arg, &range,
42108 + if (copy_to_user((struct fstrim_range __user *)arg, &range,
42109 sizeof(range)))
42110 return -EFAULT;
42111
42112 diff -urNp linux-3.0.8/fs/ext4/balloc.c linux-3.0.8/fs/ext4/balloc.c
42113 --- linux-3.0.8/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
42114 +++ linux-3.0.8/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
42115 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
42116 /* Hm, nope. Are (enough) root reserved blocks available? */
42117 if (sbi->s_resuid == current_fsuid() ||
42118 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42119 - capable(CAP_SYS_RESOURCE) ||
42120 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42121 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42122 + capable_nolog(CAP_SYS_RESOURCE)) {
42123
42124 if (free_blocks >= (nblocks + dirty_blocks))
42125 return 1;
42126 diff -urNp linux-3.0.8/fs/ext4/ext4.h linux-3.0.8/fs/ext4/ext4.h
42127 --- linux-3.0.8/fs/ext4/ext4.h 2011-10-24 08:05:21.000000000 -0400
42128 +++ linux-3.0.8/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
42129 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
42130 unsigned long s_mb_last_start;
42131
42132 /* stats for buddy allocator */
42133 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42134 - atomic_t s_bal_success; /* we found long enough chunks */
42135 - atomic_t s_bal_allocated; /* in blocks */
42136 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42137 - atomic_t s_bal_goals; /* goal hits */
42138 - atomic_t s_bal_breaks; /* too long searches */
42139 - atomic_t s_bal_2orders; /* 2^order hits */
42140 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42141 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42142 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42143 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42144 + atomic_unchecked_t s_bal_goals; /* goal hits */
42145 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42146 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42147 spinlock_t s_bal_lock;
42148 unsigned long s_mb_buddies_generated;
42149 unsigned long long s_mb_generation_time;
42150 - atomic_t s_mb_lost_chunks;
42151 - atomic_t s_mb_preallocated;
42152 - atomic_t s_mb_discarded;
42153 + atomic_unchecked_t s_mb_lost_chunks;
42154 + atomic_unchecked_t s_mb_preallocated;
42155 + atomic_unchecked_t s_mb_discarded;
42156 atomic_t s_lock_busy;
42157
42158 /* locality groups */
42159 diff -urNp linux-3.0.8/fs/ext4/file.c linux-3.0.8/fs/ext4/file.c
42160 --- linux-3.0.8/fs/ext4/file.c 2011-07-21 22:17:23.000000000 -0400
42161 +++ linux-3.0.8/fs/ext4/file.c 2011-10-17 02:30:30.000000000 -0400
42162 @@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
42163 path.dentry = mnt->mnt_root;
42164 cp = d_path(&path, buf, sizeof(buf));
42165 if (!IS_ERR(cp)) {
42166 - memcpy(sbi->s_es->s_last_mounted, cp,
42167 - sizeof(sbi->s_es->s_last_mounted));
42168 + strlcpy(sbi->s_es->s_last_mounted, cp,
42169 + sizeof(sbi->s_es->s_last_mounted));
42170 ext4_mark_super_dirty(sb);
42171 }
42172 }
42173 diff -urNp linux-3.0.8/fs/ext4/ioctl.c linux-3.0.8/fs/ext4/ioctl.c
42174 --- linux-3.0.8/fs/ext4/ioctl.c 2011-07-21 22:17:23.000000000 -0400
42175 +++ linux-3.0.8/fs/ext4/ioctl.c 2011-10-06 04:17:55.000000000 -0400
42176 @@ -344,7 +344,7 @@ mext_out:
42177 if (!blk_queue_discard(q))
42178 return -EOPNOTSUPP;
42179
42180 - if (copy_from_user(&range, (struct fstrim_range *)arg,
42181 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
42182 sizeof(range)))
42183 return -EFAULT;
42184
42185 @@ -354,7 +354,7 @@ mext_out:
42186 if (ret < 0)
42187 return ret;
42188
42189 - if (copy_to_user((struct fstrim_range *)arg, &range,
42190 + if (copy_to_user((struct fstrim_range __user *)arg, &range,
42191 sizeof(range)))
42192 return -EFAULT;
42193
42194 diff -urNp linux-3.0.8/fs/ext4/mballoc.c linux-3.0.8/fs/ext4/mballoc.c
42195 --- linux-3.0.8/fs/ext4/mballoc.c 2011-10-24 08:05:21.000000000 -0400
42196 +++ linux-3.0.8/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
42197 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
42198 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42199
42200 if (EXT4_SB(sb)->s_mb_stats)
42201 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42202 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42203
42204 break;
42205 }
42206 @@ -2087,7 +2087,7 @@ repeat:
42207 ac->ac_status = AC_STATUS_CONTINUE;
42208 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42209 cr = 3;
42210 - atomic_inc(&sbi->s_mb_lost_chunks);
42211 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42212 goto repeat;
42213 }
42214 }
42215 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
42216 ext4_grpblk_t counters[16];
42217 } sg;
42218
42219 + pax_track_stack();
42220 +
42221 group--;
42222 if (group == 0)
42223 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
42224 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
42225 if (sbi->s_mb_stats) {
42226 printk(KERN_INFO
42227 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
42228 - atomic_read(&sbi->s_bal_allocated),
42229 - atomic_read(&sbi->s_bal_reqs),
42230 - atomic_read(&sbi->s_bal_success));
42231 + atomic_read_unchecked(&sbi->s_bal_allocated),
42232 + atomic_read_unchecked(&sbi->s_bal_reqs),
42233 + atomic_read_unchecked(&sbi->s_bal_success));
42234 printk(KERN_INFO
42235 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
42236 "%u 2^N hits, %u breaks, %u lost\n",
42237 - atomic_read(&sbi->s_bal_ex_scanned),
42238 - atomic_read(&sbi->s_bal_goals),
42239 - atomic_read(&sbi->s_bal_2orders),
42240 - atomic_read(&sbi->s_bal_breaks),
42241 - atomic_read(&sbi->s_mb_lost_chunks));
42242 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42243 + atomic_read_unchecked(&sbi->s_bal_goals),
42244 + atomic_read_unchecked(&sbi->s_bal_2orders),
42245 + atomic_read_unchecked(&sbi->s_bal_breaks),
42246 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42247 printk(KERN_INFO
42248 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
42249 sbi->s_mb_buddies_generated++,
42250 sbi->s_mb_generation_time);
42251 printk(KERN_INFO
42252 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
42253 - atomic_read(&sbi->s_mb_preallocated),
42254 - atomic_read(&sbi->s_mb_discarded));
42255 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42256 + atomic_read_unchecked(&sbi->s_mb_discarded));
42257 }
42258
42259 free_percpu(sbi->s_locality_groups);
42260 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
42261 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42262
42263 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42264 - atomic_inc(&sbi->s_bal_reqs);
42265 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42266 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42267 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42268 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42269 - atomic_inc(&sbi->s_bal_success);
42270 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42271 + atomic_inc_unchecked(&sbi->s_bal_success);
42272 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42273 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42274 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42275 - atomic_inc(&sbi->s_bal_goals);
42276 + atomic_inc_unchecked(&sbi->s_bal_goals);
42277 if (ac->ac_found > sbi->s_mb_max_to_scan)
42278 - atomic_inc(&sbi->s_bal_breaks);
42279 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42280 }
42281
42282 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42283 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
42284 trace_ext4_mb_new_inode_pa(ac, pa);
42285
42286 ext4_mb_use_inode_pa(ac, pa);
42287 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42288 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42289
42290 ei = EXT4_I(ac->ac_inode);
42291 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42292 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
42293 trace_ext4_mb_new_group_pa(ac, pa);
42294
42295 ext4_mb_use_group_pa(ac, pa);
42296 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42297 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42298
42299 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42300 lg = ac->ac_lg;
42301 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
42302 * from the bitmap and continue.
42303 */
42304 }
42305 - atomic_add(free, &sbi->s_mb_discarded);
42306 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42307
42308 return err;
42309 }
42310 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
42311 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42312 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42313 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42314 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42315 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42316 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42317
42318 return 0;
42319 diff -urNp linux-3.0.8/fs/fcntl.c linux-3.0.8/fs/fcntl.c
42320 --- linux-3.0.8/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
42321 +++ linux-3.0.8/fs/fcntl.c 2011-10-06 04:17:55.000000000 -0400
42322 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
42323 if (err)
42324 return err;
42325
42326 + if (gr_handle_chroot_fowner(pid, type))
42327 + return -ENOENT;
42328 + if (gr_check_protected_task_fowner(pid, type))
42329 + return -EACCES;
42330 +
42331 f_modown(filp, pid, type, force);
42332 return 0;
42333 }
42334 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42335
42336 static int f_setown_ex(struct file *filp, unsigned long arg)
42337 {
42338 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42339 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42340 struct f_owner_ex owner;
42341 struct pid *pid;
42342 int type;
42343 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
42344
42345 static int f_getown_ex(struct file *filp, unsigned long arg)
42346 {
42347 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42348 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42349 struct f_owner_ex owner;
42350 int ret = 0;
42351
42352 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
42353 switch (cmd) {
42354 case F_DUPFD:
42355 case F_DUPFD_CLOEXEC:
42356 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42357 if (arg >= rlimit(RLIMIT_NOFILE))
42358 break;
42359 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42360 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
42361 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
42362 * is defined as O_NONBLOCK on some platforms and not on others.
42363 */
42364 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
42365 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
42366 O_RDONLY | O_WRONLY | O_RDWR |
42367 O_CREAT | O_EXCL | O_NOCTTY |
42368 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
42369 __O_SYNC | O_DSYNC | FASYNC |
42370 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
42371 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
42372 - __FMODE_EXEC | O_PATH
42373 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
42374 ));
42375
42376 fasync_cache = kmem_cache_create("fasync_cache",
42377 diff -urNp linux-3.0.8/fs/fifo.c linux-3.0.8/fs/fifo.c
42378 --- linux-3.0.8/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
42379 +++ linux-3.0.8/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
42380 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
42381 */
42382 filp->f_op = &read_pipefifo_fops;
42383 pipe->r_counter++;
42384 - if (pipe->readers++ == 0)
42385 + if (atomic_inc_return(&pipe->readers) == 1)
42386 wake_up_partner(inode);
42387
42388 - if (!pipe->writers) {
42389 + if (!atomic_read(&pipe->writers)) {
42390 if ((filp->f_flags & O_NONBLOCK)) {
42391 /* suppress POLLHUP until we have
42392 * seen a writer */
42393 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
42394 * errno=ENXIO when there is no process reading the FIFO.
42395 */
42396 ret = -ENXIO;
42397 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42398 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42399 goto err;
42400
42401 filp->f_op = &write_pipefifo_fops;
42402 pipe->w_counter++;
42403 - if (!pipe->writers++)
42404 + if (atomic_inc_return(&pipe->writers) == 1)
42405 wake_up_partner(inode);
42406
42407 - if (!pipe->readers) {
42408 + if (!atomic_read(&pipe->readers)) {
42409 wait_for_partner(inode, &pipe->r_counter);
42410 if (signal_pending(current))
42411 goto err_wr;
42412 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
42413 */
42414 filp->f_op = &rdwr_pipefifo_fops;
42415
42416 - pipe->readers++;
42417 - pipe->writers++;
42418 + atomic_inc(&pipe->readers);
42419 + atomic_inc(&pipe->writers);
42420 pipe->r_counter++;
42421 pipe->w_counter++;
42422 - if (pipe->readers == 1 || pipe->writers == 1)
42423 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42424 wake_up_partner(inode);
42425 break;
42426
42427 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
42428 return 0;
42429
42430 err_rd:
42431 - if (!--pipe->readers)
42432 + if (atomic_dec_and_test(&pipe->readers))
42433 wake_up_interruptible(&pipe->wait);
42434 ret = -ERESTARTSYS;
42435 goto err;
42436
42437 err_wr:
42438 - if (!--pipe->writers)
42439 + if (atomic_dec_and_test(&pipe->writers))
42440 wake_up_interruptible(&pipe->wait);
42441 ret = -ERESTARTSYS;
42442 goto err;
42443
42444 err:
42445 - if (!pipe->readers && !pipe->writers)
42446 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42447 free_pipe_info(inode);
42448
42449 err_nocleanup:
42450 diff -urNp linux-3.0.8/fs/file.c linux-3.0.8/fs/file.c
42451 --- linux-3.0.8/fs/file.c 2011-07-21 22:17:23.000000000 -0400
42452 +++ linux-3.0.8/fs/file.c 2011-08-23 21:48:14.000000000 -0400
42453 @@ -15,6 +15,7 @@
42454 #include <linux/slab.h>
42455 #include <linux/vmalloc.h>
42456 #include <linux/file.h>
42457 +#include <linux/security.h>
42458 #include <linux/fdtable.h>
42459 #include <linux/bitops.h>
42460 #include <linux/interrupt.h>
42461 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
42462 * N.B. For clone tasks sharing a files structure, this test
42463 * will limit the total number of files that can be opened.
42464 */
42465 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42466 if (nr >= rlimit(RLIMIT_NOFILE))
42467 return -EMFILE;
42468
42469 diff -urNp linux-3.0.8/fs/filesystems.c linux-3.0.8/fs/filesystems.c
42470 --- linux-3.0.8/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
42471 +++ linux-3.0.8/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
42472 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
42473 int len = dot ? dot - name : strlen(name);
42474
42475 fs = __get_fs_type(name, len);
42476 +
42477 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42478 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42479 +#else
42480 if (!fs && (request_module("%.*s", len, name) == 0))
42481 +#endif
42482 fs = __get_fs_type(name, len);
42483
42484 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42485 diff -urNp linux-3.0.8/fs/fscache/cookie.c linux-3.0.8/fs/fscache/cookie.c
42486 --- linux-3.0.8/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
42487 +++ linux-3.0.8/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
42488 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42489 parent ? (char *) parent->def->name : "<no-parent>",
42490 def->name, netfs_data);
42491
42492 - fscache_stat(&fscache_n_acquires);
42493 + fscache_stat_unchecked(&fscache_n_acquires);
42494
42495 /* if there's no parent cookie, then we don't create one here either */
42496 if (!parent) {
42497 - fscache_stat(&fscache_n_acquires_null);
42498 + fscache_stat_unchecked(&fscache_n_acquires_null);
42499 _leave(" [no parent]");
42500 return NULL;
42501 }
42502 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42503 /* allocate and initialise a cookie */
42504 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42505 if (!cookie) {
42506 - fscache_stat(&fscache_n_acquires_oom);
42507 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42508 _leave(" [ENOMEM]");
42509 return NULL;
42510 }
42511 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42512
42513 switch (cookie->def->type) {
42514 case FSCACHE_COOKIE_TYPE_INDEX:
42515 - fscache_stat(&fscache_n_cookie_index);
42516 + fscache_stat_unchecked(&fscache_n_cookie_index);
42517 break;
42518 case FSCACHE_COOKIE_TYPE_DATAFILE:
42519 - fscache_stat(&fscache_n_cookie_data);
42520 + fscache_stat_unchecked(&fscache_n_cookie_data);
42521 break;
42522 default:
42523 - fscache_stat(&fscache_n_cookie_special);
42524 + fscache_stat_unchecked(&fscache_n_cookie_special);
42525 break;
42526 }
42527
42528 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42529 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42530 atomic_dec(&parent->n_children);
42531 __fscache_cookie_put(cookie);
42532 - fscache_stat(&fscache_n_acquires_nobufs);
42533 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42534 _leave(" = NULL");
42535 return NULL;
42536 }
42537 }
42538
42539 - fscache_stat(&fscache_n_acquires_ok);
42540 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42541 _leave(" = %p", cookie);
42542 return cookie;
42543 }
42544 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42545 cache = fscache_select_cache_for_object(cookie->parent);
42546 if (!cache) {
42547 up_read(&fscache_addremove_sem);
42548 - fscache_stat(&fscache_n_acquires_no_cache);
42549 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42550 _leave(" = -ENOMEDIUM [no cache]");
42551 return -ENOMEDIUM;
42552 }
42553 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42554 object = cache->ops->alloc_object(cache, cookie);
42555 fscache_stat_d(&fscache_n_cop_alloc_object);
42556 if (IS_ERR(object)) {
42557 - fscache_stat(&fscache_n_object_no_alloc);
42558 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42559 ret = PTR_ERR(object);
42560 goto error;
42561 }
42562
42563 - fscache_stat(&fscache_n_object_alloc);
42564 + fscache_stat_unchecked(&fscache_n_object_alloc);
42565
42566 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42567
42568 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42569 struct fscache_object *object;
42570 struct hlist_node *_p;
42571
42572 - fscache_stat(&fscache_n_updates);
42573 + fscache_stat_unchecked(&fscache_n_updates);
42574
42575 if (!cookie) {
42576 - fscache_stat(&fscache_n_updates_null);
42577 + fscache_stat_unchecked(&fscache_n_updates_null);
42578 _leave(" [no cookie]");
42579 return;
42580 }
42581 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42582 struct fscache_object *object;
42583 unsigned long event;
42584
42585 - fscache_stat(&fscache_n_relinquishes);
42586 + fscache_stat_unchecked(&fscache_n_relinquishes);
42587 if (retire)
42588 - fscache_stat(&fscache_n_relinquishes_retire);
42589 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42590
42591 if (!cookie) {
42592 - fscache_stat(&fscache_n_relinquishes_null);
42593 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
42594 _leave(" [no cookie]");
42595 return;
42596 }
42597 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42598
42599 /* wait for the cookie to finish being instantiated (or to fail) */
42600 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42601 - fscache_stat(&fscache_n_relinquishes_waitcrt);
42602 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42603 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42604 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42605 }
42606 diff -urNp linux-3.0.8/fs/fscache/internal.h linux-3.0.8/fs/fscache/internal.h
42607 --- linux-3.0.8/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
42608 +++ linux-3.0.8/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
42609 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42610 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42611 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42612
42613 -extern atomic_t fscache_n_op_pend;
42614 -extern atomic_t fscache_n_op_run;
42615 -extern atomic_t fscache_n_op_enqueue;
42616 -extern atomic_t fscache_n_op_deferred_release;
42617 -extern atomic_t fscache_n_op_release;
42618 -extern atomic_t fscache_n_op_gc;
42619 -extern atomic_t fscache_n_op_cancelled;
42620 -extern atomic_t fscache_n_op_rejected;
42621 -
42622 -extern atomic_t fscache_n_attr_changed;
42623 -extern atomic_t fscache_n_attr_changed_ok;
42624 -extern atomic_t fscache_n_attr_changed_nobufs;
42625 -extern atomic_t fscache_n_attr_changed_nomem;
42626 -extern atomic_t fscache_n_attr_changed_calls;
42627 -
42628 -extern atomic_t fscache_n_allocs;
42629 -extern atomic_t fscache_n_allocs_ok;
42630 -extern atomic_t fscache_n_allocs_wait;
42631 -extern atomic_t fscache_n_allocs_nobufs;
42632 -extern atomic_t fscache_n_allocs_intr;
42633 -extern atomic_t fscache_n_allocs_object_dead;
42634 -extern atomic_t fscache_n_alloc_ops;
42635 -extern atomic_t fscache_n_alloc_op_waits;
42636 -
42637 -extern atomic_t fscache_n_retrievals;
42638 -extern atomic_t fscache_n_retrievals_ok;
42639 -extern atomic_t fscache_n_retrievals_wait;
42640 -extern atomic_t fscache_n_retrievals_nodata;
42641 -extern atomic_t fscache_n_retrievals_nobufs;
42642 -extern atomic_t fscache_n_retrievals_intr;
42643 -extern atomic_t fscache_n_retrievals_nomem;
42644 -extern atomic_t fscache_n_retrievals_object_dead;
42645 -extern atomic_t fscache_n_retrieval_ops;
42646 -extern atomic_t fscache_n_retrieval_op_waits;
42647 -
42648 -extern atomic_t fscache_n_stores;
42649 -extern atomic_t fscache_n_stores_ok;
42650 -extern atomic_t fscache_n_stores_again;
42651 -extern atomic_t fscache_n_stores_nobufs;
42652 -extern atomic_t fscache_n_stores_oom;
42653 -extern atomic_t fscache_n_store_ops;
42654 -extern atomic_t fscache_n_store_calls;
42655 -extern atomic_t fscache_n_store_pages;
42656 -extern atomic_t fscache_n_store_radix_deletes;
42657 -extern atomic_t fscache_n_store_pages_over_limit;
42658 -
42659 -extern atomic_t fscache_n_store_vmscan_not_storing;
42660 -extern atomic_t fscache_n_store_vmscan_gone;
42661 -extern atomic_t fscache_n_store_vmscan_busy;
42662 -extern atomic_t fscache_n_store_vmscan_cancelled;
42663 -
42664 -extern atomic_t fscache_n_marks;
42665 -extern atomic_t fscache_n_uncaches;
42666 -
42667 -extern atomic_t fscache_n_acquires;
42668 -extern atomic_t fscache_n_acquires_null;
42669 -extern atomic_t fscache_n_acquires_no_cache;
42670 -extern atomic_t fscache_n_acquires_ok;
42671 -extern atomic_t fscache_n_acquires_nobufs;
42672 -extern atomic_t fscache_n_acquires_oom;
42673 -
42674 -extern atomic_t fscache_n_updates;
42675 -extern atomic_t fscache_n_updates_null;
42676 -extern atomic_t fscache_n_updates_run;
42677 -
42678 -extern atomic_t fscache_n_relinquishes;
42679 -extern atomic_t fscache_n_relinquishes_null;
42680 -extern atomic_t fscache_n_relinquishes_waitcrt;
42681 -extern atomic_t fscache_n_relinquishes_retire;
42682 -
42683 -extern atomic_t fscache_n_cookie_index;
42684 -extern atomic_t fscache_n_cookie_data;
42685 -extern atomic_t fscache_n_cookie_special;
42686 -
42687 -extern atomic_t fscache_n_object_alloc;
42688 -extern atomic_t fscache_n_object_no_alloc;
42689 -extern atomic_t fscache_n_object_lookups;
42690 -extern atomic_t fscache_n_object_lookups_negative;
42691 -extern atomic_t fscache_n_object_lookups_positive;
42692 -extern atomic_t fscache_n_object_lookups_timed_out;
42693 -extern atomic_t fscache_n_object_created;
42694 -extern atomic_t fscache_n_object_avail;
42695 -extern atomic_t fscache_n_object_dead;
42696 -
42697 -extern atomic_t fscache_n_checkaux_none;
42698 -extern atomic_t fscache_n_checkaux_okay;
42699 -extern atomic_t fscache_n_checkaux_update;
42700 -extern atomic_t fscache_n_checkaux_obsolete;
42701 +extern atomic_unchecked_t fscache_n_op_pend;
42702 +extern atomic_unchecked_t fscache_n_op_run;
42703 +extern atomic_unchecked_t fscache_n_op_enqueue;
42704 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42705 +extern atomic_unchecked_t fscache_n_op_release;
42706 +extern atomic_unchecked_t fscache_n_op_gc;
42707 +extern atomic_unchecked_t fscache_n_op_cancelled;
42708 +extern atomic_unchecked_t fscache_n_op_rejected;
42709 +
42710 +extern atomic_unchecked_t fscache_n_attr_changed;
42711 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
42712 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42713 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42714 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
42715 +
42716 +extern atomic_unchecked_t fscache_n_allocs;
42717 +extern atomic_unchecked_t fscache_n_allocs_ok;
42718 +extern atomic_unchecked_t fscache_n_allocs_wait;
42719 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
42720 +extern atomic_unchecked_t fscache_n_allocs_intr;
42721 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
42722 +extern atomic_unchecked_t fscache_n_alloc_ops;
42723 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
42724 +
42725 +extern atomic_unchecked_t fscache_n_retrievals;
42726 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42727 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42728 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42729 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42730 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42731 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42732 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42733 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42734 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42735 +
42736 +extern atomic_unchecked_t fscache_n_stores;
42737 +extern atomic_unchecked_t fscache_n_stores_ok;
42738 +extern atomic_unchecked_t fscache_n_stores_again;
42739 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42740 +extern atomic_unchecked_t fscache_n_stores_oom;
42741 +extern atomic_unchecked_t fscache_n_store_ops;
42742 +extern atomic_unchecked_t fscache_n_store_calls;
42743 +extern atomic_unchecked_t fscache_n_store_pages;
42744 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42745 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42746 +
42747 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42748 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42749 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42750 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42751 +
42752 +extern atomic_unchecked_t fscache_n_marks;
42753 +extern atomic_unchecked_t fscache_n_uncaches;
42754 +
42755 +extern atomic_unchecked_t fscache_n_acquires;
42756 +extern atomic_unchecked_t fscache_n_acquires_null;
42757 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42758 +extern atomic_unchecked_t fscache_n_acquires_ok;
42759 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42760 +extern atomic_unchecked_t fscache_n_acquires_oom;
42761 +
42762 +extern atomic_unchecked_t fscache_n_updates;
42763 +extern atomic_unchecked_t fscache_n_updates_null;
42764 +extern atomic_unchecked_t fscache_n_updates_run;
42765 +
42766 +extern atomic_unchecked_t fscache_n_relinquishes;
42767 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42768 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42769 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42770 +
42771 +extern atomic_unchecked_t fscache_n_cookie_index;
42772 +extern atomic_unchecked_t fscache_n_cookie_data;
42773 +extern atomic_unchecked_t fscache_n_cookie_special;
42774 +
42775 +extern atomic_unchecked_t fscache_n_object_alloc;
42776 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42777 +extern atomic_unchecked_t fscache_n_object_lookups;
42778 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42779 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42780 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42781 +extern atomic_unchecked_t fscache_n_object_created;
42782 +extern atomic_unchecked_t fscache_n_object_avail;
42783 +extern atomic_unchecked_t fscache_n_object_dead;
42784 +
42785 +extern atomic_unchecked_t fscache_n_checkaux_none;
42786 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42787 +extern atomic_unchecked_t fscache_n_checkaux_update;
42788 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42789
42790 extern atomic_t fscache_n_cop_alloc_object;
42791 extern atomic_t fscache_n_cop_lookup_object;
42792 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42793 atomic_inc(stat);
42794 }
42795
42796 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42797 +{
42798 + atomic_inc_unchecked(stat);
42799 +}
42800 +
42801 static inline void fscache_stat_d(atomic_t *stat)
42802 {
42803 atomic_dec(stat);
42804 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
42805
42806 #define __fscache_stat(stat) (NULL)
42807 #define fscache_stat(stat) do {} while (0)
42808 +#define fscache_stat_unchecked(stat) do {} while (0)
42809 #define fscache_stat_d(stat) do {} while (0)
42810 #endif
42811
42812 diff -urNp linux-3.0.8/fs/fscache/object.c linux-3.0.8/fs/fscache/object.c
42813 --- linux-3.0.8/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
42814 +++ linux-3.0.8/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
42815 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
42816 /* update the object metadata on disk */
42817 case FSCACHE_OBJECT_UPDATING:
42818 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42819 - fscache_stat(&fscache_n_updates_run);
42820 + fscache_stat_unchecked(&fscache_n_updates_run);
42821 fscache_stat(&fscache_n_cop_update_object);
42822 object->cache->ops->update_object(object);
42823 fscache_stat_d(&fscache_n_cop_update_object);
42824 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
42825 spin_lock(&object->lock);
42826 object->state = FSCACHE_OBJECT_DEAD;
42827 spin_unlock(&object->lock);
42828 - fscache_stat(&fscache_n_object_dead);
42829 + fscache_stat_unchecked(&fscache_n_object_dead);
42830 goto terminal_transit;
42831
42832 /* handle the parent cache of this object being withdrawn from
42833 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
42834 spin_lock(&object->lock);
42835 object->state = FSCACHE_OBJECT_DEAD;
42836 spin_unlock(&object->lock);
42837 - fscache_stat(&fscache_n_object_dead);
42838 + fscache_stat_unchecked(&fscache_n_object_dead);
42839 goto terminal_transit;
42840
42841 /* complain about the object being woken up once it is
42842 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42843 parent->cookie->def->name, cookie->def->name,
42844 object->cache->tag->name);
42845
42846 - fscache_stat(&fscache_n_object_lookups);
42847 + fscache_stat_unchecked(&fscache_n_object_lookups);
42848 fscache_stat(&fscache_n_cop_lookup_object);
42849 ret = object->cache->ops->lookup_object(object);
42850 fscache_stat_d(&fscache_n_cop_lookup_object);
42851 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42852 if (ret == -ETIMEDOUT) {
42853 /* probably stuck behind another object, so move this one to
42854 * the back of the queue */
42855 - fscache_stat(&fscache_n_object_lookups_timed_out);
42856 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42857 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42858 }
42859
42860 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42861
42862 spin_lock(&object->lock);
42863 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42864 - fscache_stat(&fscache_n_object_lookups_negative);
42865 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42866
42867 /* transit here to allow write requests to begin stacking up
42868 * and read requests to begin returning ENODATA */
42869 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42870 * result, in which case there may be data available */
42871 spin_lock(&object->lock);
42872 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42873 - fscache_stat(&fscache_n_object_lookups_positive);
42874 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42875
42876 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42877
42878 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42879 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42880 } else {
42881 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42882 - fscache_stat(&fscache_n_object_created);
42883 + fscache_stat_unchecked(&fscache_n_object_created);
42884
42885 object->state = FSCACHE_OBJECT_AVAILABLE;
42886 spin_unlock(&object->lock);
42887 @@ -602,7 +602,7 @@ static void fscache_object_available(str
42888 fscache_enqueue_dependents(object);
42889
42890 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42891 - fscache_stat(&fscache_n_object_avail);
42892 + fscache_stat_unchecked(&fscache_n_object_avail);
42893
42894 _leave("");
42895 }
42896 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42897 enum fscache_checkaux result;
42898
42899 if (!object->cookie->def->check_aux) {
42900 - fscache_stat(&fscache_n_checkaux_none);
42901 + fscache_stat_unchecked(&fscache_n_checkaux_none);
42902 return FSCACHE_CHECKAUX_OKAY;
42903 }
42904
42905 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42906 switch (result) {
42907 /* entry okay as is */
42908 case FSCACHE_CHECKAUX_OKAY:
42909 - fscache_stat(&fscache_n_checkaux_okay);
42910 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
42911 break;
42912
42913 /* entry requires update */
42914 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42915 - fscache_stat(&fscache_n_checkaux_update);
42916 + fscache_stat_unchecked(&fscache_n_checkaux_update);
42917 break;
42918
42919 /* entry requires deletion */
42920 case FSCACHE_CHECKAUX_OBSOLETE:
42921 - fscache_stat(&fscache_n_checkaux_obsolete);
42922 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42923 break;
42924
42925 default:
42926 diff -urNp linux-3.0.8/fs/fscache/operation.c linux-3.0.8/fs/fscache/operation.c
42927 --- linux-3.0.8/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
42928 +++ linux-3.0.8/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
42929 @@ -17,7 +17,7 @@
42930 #include <linux/slab.h>
42931 #include "internal.h"
42932
42933 -atomic_t fscache_op_debug_id;
42934 +atomic_unchecked_t fscache_op_debug_id;
42935 EXPORT_SYMBOL(fscache_op_debug_id);
42936
42937 /**
42938 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42939 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42940 ASSERTCMP(atomic_read(&op->usage), >, 0);
42941
42942 - fscache_stat(&fscache_n_op_enqueue);
42943 + fscache_stat_unchecked(&fscache_n_op_enqueue);
42944 switch (op->flags & FSCACHE_OP_TYPE) {
42945 case FSCACHE_OP_ASYNC:
42946 _debug("queue async");
42947 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42948 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42949 if (op->processor)
42950 fscache_enqueue_operation(op);
42951 - fscache_stat(&fscache_n_op_run);
42952 + fscache_stat_unchecked(&fscache_n_op_run);
42953 }
42954
42955 /*
42956 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42957 if (object->n_ops > 1) {
42958 atomic_inc(&op->usage);
42959 list_add_tail(&op->pend_link, &object->pending_ops);
42960 - fscache_stat(&fscache_n_op_pend);
42961 + fscache_stat_unchecked(&fscache_n_op_pend);
42962 } else if (!list_empty(&object->pending_ops)) {
42963 atomic_inc(&op->usage);
42964 list_add_tail(&op->pend_link, &object->pending_ops);
42965 - fscache_stat(&fscache_n_op_pend);
42966 + fscache_stat_unchecked(&fscache_n_op_pend);
42967 fscache_start_operations(object);
42968 } else {
42969 ASSERTCMP(object->n_in_progress, ==, 0);
42970 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42971 object->n_exclusive++; /* reads and writes must wait */
42972 atomic_inc(&op->usage);
42973 list_add_tail(&op->pend_link, &object->pending_ops);
42974 - fscache_stat(&fscache_n_op_pend);
42975 + fscache_stat_unchecked(&fscache_n_op_pend);
42976 ret = 0;
42977 } else {
42978 /* not allowed to submit ops in any other state */
42979 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42980 if (object->n_exclusive > 0) {
42981 atomic_inc(&op->usage);
42982 list_add_tail(&op->pend_link, &object->pending_ops);
42983 - fscache_stat(&fscache_n_op_pend);
42984 + fscache_stat_unchecked(&fscache_n_op_pend);
42985 } else if (!list_empty(&object->pending_ops)) {
42986 atomic_inc(&op->usage);
42987 list_add_tail(&op->pend_link, &object->pending_ops);
42988 - fscache_stat(&fscache_n_op_pend);
42989 + fscache_stat_unchecked(&fscache_n_op_pend);
42990 fscache_start_operations(object);
42991 } else {
42992 ASSERTCMP(object->n_exclusive, ==, 0);
42993 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42994 object->n_ops++;
42995 atomic_inc(&op->usage);
42996 list_add_tail(&op->pend_link, &object->pending_ops);
42997 - fscache_stat(&fscache_n_op_pend);
42998 + fscache_stat_unchecked(&fscache_n_op_pend);
42999 ret = 0;
43000 } else if (object->state == FSCACHE_OBJECT_DYING ||
43001 object->state == FSCACHE_OBJECT_LC_DYING ||
43002 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43003 - fscache_stat(&fscache_n_op_rejected);
43004 + fscache_stat_unchecked(&fscache_n_op_rejected);
43005 ret = -ENOBUFS;
43006 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43007 fscache_report_unexpected_submission(object, op, ostate);
43008 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
43009
43010 ret = -EBUSY;
43011 if (!list_empty(&op->pend_link)) {
43012 - fscache_stat(&fscache_n_op_cancelled);
43013 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43014 list_del_init(&op->pend_link);
43015 object->n_ops--;
43016 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43017 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
43018 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43019 BUG();
43020
43021 - fscache_stat(&fscache_n_op_release);
43022 + fscache_stat_unchecked(&fscache_n_op_release);
43023
43024 if (op->release) {
43025 op->release(op);
43026 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
43027 * lock, and defer it otherwise */
43028 if (!spin_trylock(&object->lock)) {
43029 _debug("defer put");
43030 - fscache_stat(&fscache_n_op_deferred_release);
43031 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43032
43033 cache = object->cache;
43034 spin_lock(&cache->op_gc_list_lock);
43035 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
43036
43037 _debug("GC DEFERRED REL OBJ%x OP%x",
43038 object->debug_id, op->debug_id);
43039 - fscache_stat(&fscache_n_op_gc);
43040 + fscache_stat_unchecked(&fscache_n_op_gc);
43041
43042 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43043
43044 diff -urNp linux-3.0.8/fs/fscache/page.c linux-3.0.8/fs/fscache/page.c
43045 --- linux-3.0.8/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
43046 +++ linux-3.0.8/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
43047 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
43048 val = radix_tree_lookup(&cookie->stores, page->index);
43049 if (!val) {
43050 rcu_read_unlock();
43051 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43052 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43053 __fscache_uncache_page(cookie, page);
43054 return true;
43055 }
43056 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
43057 spin_unlock(&cookie->stores_lock);
43058
43059 if (xpage) {
43060 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43061 - fscache_stat(&fscache_n_store_radix_deletes);
43062 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43063 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43064 ASSERTCMP(xpage, ==, page);
43065 } else {
43066 - fscache_stat(&fscache_n_store_vmscan_gone);
43067 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43068 }
43069
43070 wake_up_bit(&cookie->flags, 0);
43071 @@ -107,7 +107,7 @@ page_busy:
43072 /* we might want to wait here, but that could deadlock the allocator as
43073 * the work threads writing to the cache may all end up sleeping
43074 * on memory allocation */
43075 - fscache_stat(&fscache_n_store_vmscan_busy);
43076 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43077 return false;
43078 }
43079 EXPORT_SYMBOL(__fscache_maybe_release_page);
43080 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
43081 FSCACHE_COOKIE_STORING_TAG);
43082 if (!radix_tree_tag_get(&cookie->stores, page->index,
43083 FSCACHE_COOKIE_PENDING_TAG)) {
43084 - fscache_stat(&fscache_n_store_radix_deletes);
43085 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43086 xpage = radix_tree_delete(&cookie->stores, page->index);
43087 }
43088 spin_unlock(&cookie->stores_lock);
43089 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
43090
43091 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43092
43093 - fscache_stat(&fscache_n_attr_changed_calls);
43094 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43095
43096 if (fscache_object_is_active(object)) {
43097 fscache_stat(&fscache_n_cop_attr_changed);
43098 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
43099
43100 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43101
43102 - fscache_stat(&fscache_n_attr_changed);
43103 + fscache_stat_unchecked(&fscache_n_attr_changed);
43104
43105 op = kzalloc(sizeof(*op), GFP_KERNEL);
43106 if (!op) {
43107 - fscache_stat(&fscache_n_attr_changed_nomem);
43108 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43109 _leave(" = -ENOMEM");
43110 return -ENOMEM;
43111 }
43112 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
43113 if (fscache_submit_exclusive_op(object, op) < 0)
43114 goto nobufs;
43115 spin_unlock(&cookie->lock);
43116 - fscache_stat(&fscache_n_attr_changed_ok);
43117 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43118 fscache_put_operation(op);
43119 _leave(" = 0");
43120 return 0;
43121 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
43122 nobufs:
43123 spin_unlock(&cookie->lock);
43124 kfree(op);
43125 - fscache_stat(&fscache_n_attr_changed_nobufs);
43126 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43127 _leave(" = %d", -ENOBUFS);
43128 return -ENOBUFS;
43129 }
43130 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
43131 /* allocate a retrieval operation and attempt to submit it */
43132 op = kzalloc(sizeof(*op), GFP_NOIO);
43133 if (!op) {
43134 - fscache_stat(&fscache_n_retrievals_nomem);
43135 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43136 return NULL;
43137 }
43138
43139 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
43140 return 0;
43141 }
43142
43143 - fscache_stat(&fscache_n_retrievals_wait);
43144 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43145
43146 jif = jiffies;
43147 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43148 fscache_wait_bit_interruptible,
43149 TASK_INTERRUPTIBLE) != 0) {
43150 - fscache_stat(&fscache_n_retrievals_intr);
43151 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43152 _leave(" = -ERESTARTSYS");
43153 return -ERESTARTSYS;
43154 }
43155 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
43156 */
43157 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43158 struct fscache_retrieval *op,
43159 - atomic_t *stat_op_waits,
43160 - atomic_t *stat_object_dead)
43161 + atomic_unchecked_t *stat_op_waits,
43162 + atomic_unchecked_t *stat_object_dead)
43163 {
43164 int ret;
43165
43166 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
43167 goto check_if_dead;
43168
43169 _debug(">>> WT");
43170 - fscache_stat(stat_op_waits);
43171 + fscache_stat_unchecked(stat_op_waits);
43172 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43173 fscache_wait_bit_interruptible,
43174 TASK_INTERRUPTIBLE) < 0) {
43175 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
43176
43177 check_if_dead:
43178 if (unlikely(fscache_object_is_dead(object))) {
43179 - fscache_stat(stat_object_dead);
43180 + fscache_stat_unchecked(stat_object_dead);
43181 return -ENOBUFS;
43182 }
43183 return 0;
43184 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
43185
43186 _enter("%p,%p,,,", cookie, page);
43187
43188 - fscache_stat(&fscache_n_retrievals);
43189 + fscache_stat_unchecked(&fscache_n_retrievals);
43190
43191 if (hlist_empty(&cookie->backing_objects))
43192 goto nobufs;
43193 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
43194 goto nobufs_unlock;
43195 spin_unlock(&cookie->lock);
43196
43197 - fscache_stat(&fscache_n_retrieval_ops);
43198 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43199
43200 /* pin the netfs read context in case we need to do the actual netfs
43201 * read because we've encountered a cache read failure */
43202 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
43203
43204 error:
43205 if (ret == -ENOMEM)
43206 - fscache_stat(&fscache_n_retrievals_nomem);
43207 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43208 else if (ret == -ERESTARTSYS)
43209 - fscache_stat(&fscache_n_retrievals_intr);
43210 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43211 else if (ret == -ENODATA)
43212 - fscache_stat(&fscache_n_retrievals_nodata);
43213 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43214 else if (ret < 0)
43215 - fscache_stat(&fscache_n_retrievals_nobufs);
43216 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43217 else
43218 - fscache_stat(&fscache_n_retrievals_ok);
43219 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43220
43221 fscache_put_retrieval(op);
43222 _leave(" = %d", ret);
43223 @@ -429,7 +429,7 @@ nobufs_unlock:
43224 spin_unlock(&cookie->lock);
43225 kfree(op);
43226 nobufs:
43227 - fscache_stat(&fscache_n_retrievals_nobufs);
43228 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43229 _leave(" = -ENOBUFS");
43230 return -ENOBUFS;
43231 }
43232 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
43233
43234 _enter("%p,,%d,,,", cookie, *nr_pages);
43235
43236 - fscache_stat(&fscache_n_retrievals);
43237 + fscache_stat_unchecked(&fscache_n_retrievals);
43238
43239 if (hlist_empty(&cookie->backing_objects))
43240 goto nobufs;
43241 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
43242 goto nobufs_unlock;
43243 spin_unlock(&cookie->lock);
43244
43245 - fscache_stat(&fscache_n_retrieval_ops);
43246 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43247
43248 /* pin the netfs read context in case we need to do the actual netfs
43249 * read because we've encountered a cache read failure */
43250 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
43251
43252 error:
43253 if (ret == -ENOMEM)
43254 - fscache_stat(&fscache_n_retrievals_nomem);
43255 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43256 else if (ret == -ERESTARTSYS)
43257 - fscache_stat(&fscache_n_retrievals_intr);
43258 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43259 else if (ret == -ENODATA)
43260 - fscache_stat(&fscache_n_retrievals_nodata);
43261 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43262 else if (ret < 0)
43263 - fscache_stat(&fscache_n_retrievals_nobufs);
43264 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43265 else
43266 - fscache_stat(&fscache_n_retrievals_ok);
43267 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43268
43269 fscache_put_retrieval(op);
43270 _leave(" = %d", ret);
43271 @@ -545,7 +545,7 @@ nobufs_unlock:
43272 spin_unlock(&cookie->lock);
43273 kfree(op);
43274 nobufs:
43275 - fscache_stat(&fscache_n_retrievals_nobufs);
43276 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43277 _leave(" = -ENOBUFS");
43278 return -ENOBUFS;
43279 }
43280 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
43281
43282 _enter("%p,%p,,,", cookie, page);
43283
43284 - fscache_stat(&fscache_n_allocs);
43285 + fscache_stat_unchecked(&fscache_n_allocs);
43286
43287 if (hlist_empty(&cookie->backing_objects))
43288 goto nobufs;
43289 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
43290 goto nobufs_unlock;
43291 spin_unlock(&cookie->lock);
43292
43293 - fscache_stat(&fscache_n_alloc_ops);
43294 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43295
43296 ret = fscache_wait_for_retrieval_activation(
43297 object, op,
43298 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
43299
43300 error:
43301 if (ret == -ERESTARTSYS)
43302 - fscache_stat(&fscache_n_allocs_intr);
43303 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43304 else if (ret < 0)
43305 - fscache_stat(&fscache_n_allocs_nobufs);
43306 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43307 else
43308 - fscache_stat(&fscache_n_allocs_ok);
43309 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43310
43311 fscache_put_retrieval(op);
43312 _leave(" = %d", ret);
43313 @@ -625,7 +625,7 @@ nobufs_unlock:
43314 spin_unlock(&cookie->lock);
43315 kfree(op);
43316 nobufs:
43317 - fscache_stat(&fscache_n_allocs_nobufs);
43318 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43319 _leave(" = -ENOBUFS");
43320 return -ENOBUFS;
43321 }
43322 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
43323
43324 spin_lock(&cookie->stores_lock);
43325
43326 - fscache_stat(&fscache_n_store_calls);
43327 + fscache_stat_unchecked(&fscache_n_store_calls);
43328
43329 /* find a page to store */
43330 page = NULL;
43331 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
43332 page = results[0];
43333 _debug("gang %d [%lx]", n, page->index);
43334 if (page->index > op->store_limit) {
43335 - fscache_stat(&fscache_n_store_pages_over_limit);
43336 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43337 goto superseded;
43338 }
43339
43340 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
43341 spin_unlock(&cookie->stores_lock);
43342 spin_unlock(&object->lock);
43343
43344 - fscache_stat(&fscache_n_store_pages);
43345 + fscache_stat_unchecked(&fscache_n_store_pages);
43346 fscache_stat(&fscache_n_cop_write_page);
43347 ret = object->cache->ops->write_page(op, page);
43348 fscache_stat_d(&fscache_n_cop_write_page);
43349 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
43350 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43351 ASSERT(PageFsCache(page));
43352
43353 - fscache_stat(&fscache_n_stores);
43354 + fscache_stat_unchecked(&fscache_n_stores);
43355
43356 op = kzalloc(sizeof(*op), GFP_NOIO);
43357 if (!op)
43358 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
43359 spin_unlock(&cookie->stores_lock);
43360 spin_unlock(&object->lock);
43361
43362 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43363 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43364 op->store_limit = object->store_limit;
43365
43366 if (fscache_submit_op(object, &op->op) < 0)
43367 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
43368
43369 spin_unlock(&cookie->lock);
43370 radix_tree_preload_end();
43371 - fscache_stat(&fscache_n_store_ops);
43372 - fscache_stat(&fscache_n_stores_ok);
43373 + fscache_stat_unchecked(&fscache_n_store_ops);
43374 + fscache_stat_unchecked(&fscache_n_stores_ok);
43375
43376 /* the work queue now carries its own ref on the object */
43377 fscache_put_operation(&op->op);
43378 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
43379 return 0;
43380
43381 already_queued:
43382 - fscache_stat(&fscache_n_stores_again);
43383 + fscache_stat_unchecked(&fscache_n_stores_again);
43384 already_pending:
43385 spin_unlock(&cookie->stores_lock);
43386 spin_unlock(&object->lock);
43387 spin_unlock(&cookie->lock);
43388 radix_tree_preload_end();
43389 kfree(op);
43390 - fscache_stat(&fscache_n_stores_ok);
43391 + fscache_stat_unchecked(&fscache_n_stores_ok);
43392 _leave(" = 0");
43393 return 0;
43394
43395 @@ -851,14 +851,14 @@ nobufs:
43396 spin_unlock(&cookie->lock);
43397 radix_tree_preload_end();
43398 kfree(op);
43399 - fscache_stat(&fscache_n_stores_nobufs);
43400 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43401 _leave(" = -ENOBUFS");
43402 return -ENOBUFS;
43403
43404 nomem_free:
43405 kfree(op);
43406 nomem:
43407 - fscache_stat(&fscache_n_stores_oom);
43408 + fscache_stat_unchecked(&fscache_n_stores_oom);
43409 _leave(" = -ENOMEM");
43410 return -ENOMEM;
43411 }
43412 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
43413 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43414 ASSERTCMP(page, !=, NULL);
43415
43416 - fscache_stat(&fscache_n_uncaches);
43417 + fscache_stat_unchecked(&fscache_n_uncaches);
43418
43419 /* cache withdrawal may beat us to it */
43420 if (!PageFsCache(page))
43421 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
43422 unsigned long loop;
43423
43424 #ifdef CONFIG_FSCACHE_STATS
43425 - atomic_add(pagevec->nr, &fscache_n_marks);
43426 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43427 #endif
43428
43429 for (loop = 0; loop < pagevec->nr; loop++) {
43430 diff -urNp linux-3.0.8/fs/fscache/stats.c linux-3.0.8/fs/fscache/stats.c
43431 --- linux-3.0.8/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
43432 +++ linux-3.0.8/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
43433 @@ -18,95 +18,95 @@
43434 /*
43435 * operation counters
43436 */
43437 -atomic_t fscache_n_op_pend;
43438 -atomic_t fscache_n_op_run;
43439 -atomic_t fscache_n_op_enqueue;
43440 -atomic_t fscache_n_op_requeue;
43441 -atomic_t fscache_n_op_deferred_release;
43442 -atomic_t fscache_n_op_release;
43443 -atomic_t fscache_n_op_gc;
43444 -atomic_t fscache_n_op_cancelled;
43445 -atomic_t fscache_n_op_rejected;
43446 -
43447 -atomic_t fscache_n_attr_changed;
43448 -atomic_t fscache_n_attr_changed_ok;
43449 -atomic_t fscache_n_attr_changed_nobufs;
43450 -atomic_t fscache_n_attr_changed_nomem;
43451 -atomic_t fscache_n_attr_changed_calls;
43452 -
43453 -atomic_t fscache_n_allocs;
43454 -atomic_t fscache_n_allocs_ok;
43455 -atomic_t fscache_n_allocs_wait;
43456 -atomic_t fscache_n_allocs_nobufs;
43457 -atomic_t fscache_n_allocs_intr;
43458 -atomic_t fscache_n_allocs_object_dead;
43459 -atomic_t fscache_n_alloc_ops;
43460 -atomic_t fscache_n_alloc_op_waits;
43461 -
43462 -atomic_t fscache_n_retrievals;
43463 -atomic_t fscache_n_retrievals_ok;
43464 -atomic_t fscache_n_retrievals_wait;
43465 -atomic_t fscache_n_retrievals_nodata;
43466 -atomic_t fscache_n_retrievals_nobufs;
43467 -atomic_t fscache_n_retrievals_intr;
43468 -atomic_t fscache_n_retrievals_nomem;
43469 -atomic_t fscache_n_retrievals_object_dead;
43470 -atomic_t fscache_n_retrieval_ops;
43471 -atomic_t fscache_n_retrieval_op_waits;
43472 -
43473 -atomic_t fscache_n_stores;
43474 -atomic_t fscache_n_stores_ok;
43475 -atomic_t fscache_n_stores_again;
43476 -atomic_t fscache_n_stores_nobufs;
43477 -atomic_t fscache_n_stores_oom;
43478 -atomic_t fscache_n_store_ops;
43479 -atomic_t fscache_n_store_calls;
43480 -atomic_t fscache_n_store_pages;
43481 -atomic_t fscache_n_store_radix_deletes;
43482 -atomic_t fscache_n_store_pages_over_limit;
43483 -
43484 -atomic_t fscache_n_store_vmscan_not_storing;
43485 -atomic_t fscache_n_store_vmscan_gone;
43486 -atomic_t fscache_n_store_vmscan_busy;
43487 -atomic_t fscache_n_store_vmscan_cancelled;
43488 -
43489 -atomic_t fscache_n_marks;
43490 -atomic_t fscache_n_uncaches;
43491 -
43492 -atomic_t fscache_n_acquires;
43493 -atomic_t fscache_n_acquires_null;
43494 -atomic_t fscache_n_acquires_no_cache;
43495 -atomic_t fscache_n_acquires_ok;
43496 -atomic_t fscache_n_acquires_nobufs;
43497 -atomic_t fscache_n_acquires_oom;
43498 -
43499 -atomic_t fscache_n_updates;
43500 -atomic_t fscache_n_updates_null;
43501 -atomic_t fscache_n_updates_run;
43502 -
43503 -atomic_t fscache_n_relinquishes;
43504 -atomic_t fscache_n_relinquishes_null;
43505 -atomic_t fscache_n_relinquishes_waitcrt;
43506 -atomic_t fscache_n_relinquishes_retire;
43507 -
43508 -atomic_t fscache_n_cookie_index;
43509 -atomic_t fscache_n_cookie_data;
43510 -atomic_t fscache_n_cookie_special;
43511 -
43512 -atomic_t fscache_n_object_alloc;
43513 -atomic_t fscache_n_object_no_alloc;
43514 -atomic_t fscache_n_object_lookups;
43515 -atomic_t fscache_n_object_lookups_negative;
43516 -atomic_t fscache_n_object_lookups_positive;
43517 -atomic_t fscache_n_object_lookups_timed_out;
43518 -atomic_t fscache_n_object_created;
43519 -atomic_t fscache_n_object_avail;
43520 -atomic_t fscache_n_object_dead;
43521 -
43522 -atomic_t fscache_n_checkaux_none;
43523 -atomic_t fscache_n_checkaux_okay;
43524 -atomic_t fscache_n_checkaux_update;
43525 -atomic_t fscache_n_checkaux_obsolete;
43526 +atomic_unchecked_t fscache_n_op_pend;
43527 +atomic_unchecked_t fscache_n_op_run;
43528 +atomic_unchecked_t fscache_n_op_enqueue;
43529 +atomic_unchecked_t fscache_n_op_requeue;
43530 +atomic_unchecked_t fscache_n_op_deferred_release;
43531 +atomic_unchecked_t fscache_n_op_release;
43532 +atomic_unchecked_t fscache_n_op_gc;
43533 +atomic_unchecked_t fscache_n_op_cancelled;
43534 +atomic_unchecked_t fscache_n_op_rejected;
43535 +
43536 +atomic_unchecked_t fscache_n_attr_changed;
43537 +atomic_unchecked_t fscache_n_attr_changed_ok;
43538 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43539 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43540 +atomic_unchecked_t fscache_n_attr_changed_calls;
43541 +
43542 +atomic_unchecked_t fscache_n_allocs;
43543 +atomic_unchecked_t fscache_n_allocs_ok;
43544 +atomic_unchecked_t fscache_n_allocs_wait;
43545 +atomic_unchecked_t fscache_n_allocs_nobufs;
43546 +atomic_unchecked_t fscache_n_allocs_intr;
43547 +atomic_unchecked_t fscache_n_allocs_object_dead;
43548 +atomic_unchecked_t fscache_n_alloc_ops;
43549 +atomic_unchecked_t fscache_n_alloc_op_waits;
43550 +
43551 +atomic_unchecked_t fscache_n_retrievals;
43552 +atomic_unchecked_t fscache_n_retrievals_ok;
43553 +atomic_unchecked_t fscache_n_retrievals_wait;
43554 +atomic_unchecked_t fscache_n_retrievals_nodata;
43555 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43556 +atomic_unchecked_t fscache_n_retrievals_intr;
43557 +atomic_unchecked_t fscache_n_retrievals_nomem;
43558 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43559 +atomic_unchecked_t fscache_n_retrieval_ops;
43560 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43561 +
43562 +atomic_unchecked_t fscache_n_stores;
43563 +atomic_unchecked_t fscache_n_stores_ok;
43564 +atomic_unchecked_t fscache_n_stores_again;
43565 +atomic_unchecked_t fscache_n_stores_nobufs;
43566 +atomic_unchecked_t fscache_n_stores_oom;
43567 +atomic_unchecked_t fscache_n_store_ops;
43568 +atomic_unchecked_t fscache_n_store_calls;
43569 +atomic_unchecked_t fscache_n_store_pages;
43570 +atomic_unchecked_t fscache_n_store_radix_deletes;
43571 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43572 +
43573 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43574 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43575 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43576 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43577 +
43578 +atomic_unchecked_t fscache_n_marks;
43579 +atomic_unchecked_t fscache_n_uncaches;
43580 +
43581 +atomic_unchecked_t fscache_n_acquires;
43582 +atomic_unchecked_t fscache_n_acquires_null;
43583 +atomic_unchecked_t fscache_n_acquires_no_cache;
43584 +atomic_unchecked_t fscache_n_acquires_ok;
43585 +atomic_unchecked_t fscache_n_acquires_nobufs;
43586 +atomic_unchecked_t fscache_n_acquires_oom;
43587 +
43588 +atomic_unchecked_t fscache_n_updates;
43589 +atomic_unchecked_t fscache_n_updates_null;
43590 +atomic_unchecked_t fscache_n_updates_run;
43591 +
43592 +atomic_unchecked_t fscache_n_relinquishes;
43593 +atomic_unchecked_t fscache_n_relinquishes_null;
43594 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43595 +atomic_unchecked_t fscache_n_relinquishes_retire;
43596 +
43597 +atomic_unchecked_t fscache_n_cookie_index;
43598 +atomic_unchecked_t fscache_n_cookie_data;
43599 +atomic_unchecked_t fscache_n_cookie_special;
43600 +
43601 +atomic_unchecked_t fscache_n_object_alloc;
43602 +atomic_unchecked_t fscache_n_object_no_alloc;
43603 +atomic_unchecked_t fscache_n_object_lookups;
43604 +atomic_unchecked_t fscache_n_object_lookups_negative;
43605 +atomic_unchecked_t fscache_n_object_lookups_positive;
43606 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
43607 +atomic_unchecked_t fscache_n_object_created;
43608 +atomic_unchecked_t fscache_n_object_avail;
43609 +atomic_unchecked_t fscache_n_object_dead;
43610 +
43611 +atomic_unchecked_t fscache_n_checkaux_none;
43612 +atomic_unchecked_t fscache_n_checkaux_okay;
43613 +atomic_unchecked_t fscache_n_checkaux_update;
43614 +atomic_unchecked_t fscache_n_checkaux_obsolete;
43615
43616 atomic_t fscache_n_cop_alloc_object;
43617 atomic_t fscache_n_cop_lookup_object;
43618 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43619 seq_puts(m, "FS-Cache statistics\n");
43620
43621 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43622 - atomic_read(&fscache_n_cookie_index),
43623 - atomic_read(&fscache_n_cookie_data),
43624 - atomic_read(&fscache_n_cookie_special));
43625 + atomic_read_unchecked(&fscache_n_cookie_index),
43626 + atomic_read_unchecked(&fscache_n_cookie_data),
43627 + atomic_read_unchecked(&fscache_n_cookie_special));
43628
43629 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43630 - atomic_read(&fscache_n_object_alloc),
43631 - atomic_read(&fscache_n_object_no_alloc),
43632 - atomic_read(&fscache_n_object_avail),
43633 - atomic_read(&fscache_n_object_dead));
43634 + atomic_read_unchecked(&fscache_n_object_alloc),
43635 + atomic_read_unchecked(&fscache_n_object_no_alloc),
43636 + atomic_read_unchecked(&fscache_n_object_avail),
43637 + atomic_read_unchecked(&fscache_n_object_dead));
43638 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43639 - atomic_read(&fscache_n_checkaux_none),
43640 - atomic_read(&fscache_n_checkaux_okay),
43641 - atomic_read(&fscache_n_checkaux_update),
43642 - atomic_read(&fscache_n_checkaux_obsolete));
43643 + atomic_read_unchecked(&fscache_n_checkaux_none),
43644 + atomic_read_unchecked(&fscache_n_checkaux_okay),
43645 + atomic_read_unchecked(&fscache_n_checkaux_update),
43646 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43647
43648 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43649 - atomic_read(&fscache_n_marks),
43650 - atomic_read(&fscache_n_uncaches));
43651 + atomic_read_unchecked(&fscache_n_marks),
43652 + atomic_read_unchecked(&fscache_n_uncaches));
43653
43654 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43655 " oom=%u\n",
43656 - atomic_read(&fscache_n_acquires),
43657 - atomic_read(&fscache_n_acquires_null),
43658 - atomic_read(&fscache_n_acquires_no_cache),
43659 - atomic_read(&fscache_n_acquires_ok),
43660 - atomic_read(&fscache_n_acquires_nobufs),
43661 - atomic_read(&fscache_n_acquires_oom));
43662 + atomic_read_unchecked(&fscache_n_acquires),
43663 + atomic_read_unchecked(&fscache_n_acquires_null),
43664 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
43665 + atomic_read_unchecked(&fscache_n_acquires_ok),
43666 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
43667 + atomic_read_unchecked(&fscache_n_acquires_oom));
43668
43669 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43670 - atomic_read(&fscache_n_object_lookups),
43671 - atomic_read(&fscache_n_object_lookups_negative),
43672 - atomic_read(&fscache_n_object_lookups_positive),
43673 - atomic_read(&fscache_n_object_created),
43674 - atomic_read(&fscache_n_object_lookups_timed_out));
43675 + atomic_read_unchecked(&fscache_n_object_lookups),
43676 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
43677 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
43678 + atomic_read_unchecked(&fscache_n_object_created),
43679 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43680
43681 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43682 - atomic_read(&fscache_n_updates),
43683 - atomic_read(&fscache_n_updates_null),
43684 - atomic_read(&fscache_n_updates_run));
43685 + atomic_read_unchecked(&fscache_n_updates),
43686 + atomic_read_unchecked(&fscache_n_updates_null),
43687 + atomic_read_unchecked(&fscache_n_updates_run));
43688
43689 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43690 - atomic_read(&fscache_n_relinquishes),
43691 - atomic_read(&fscache_n_relinquishes_null),
43692 - atomic_read(&fscache_n_relinquishes_waitcrt),
43693 - atomic_read(&fscache_n_relinquishes_retire));
43694 + atomic_read_unchecked(&fscache_n_relinquishes),
43695 + atomic_read_unchecked(&fscache_n_relinquishes_null),
43696 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43697 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
43698
43699 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43700 - atomic_read(&fscache_n_attr_changed),
43701 - atomic_read(&fscache_n_attr_changed_ok),
43702 - atomic_read(&fscache_n_attr_changed_nobufs),
43703 - atomic_read(&fscache_n_attr_changed_nomem),
43704 - atomic_read(&fscache_n_attr_changed_calls));
43705 + atomic_read_unchecked(&fscache_n_attr_changed),
43706 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
43707 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43708 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43709 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
43710
43711 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43712 - atomic_read(&fscache_n_allocs),
43713 - atomic_read(&fscache_n_allocs_ok),
43714 - atomic_read(&fscache_n_allocs_wait),
43715 - atomic_read(&fscache_n_allocs_nobufs),
43716 - atomic_read(&fscache_n_allocs_intr));
43717 + atomic_read_unchecked(&fscache_n_allocs),
43718 + atomic_read_unchecked(&fscache_n_allocs_ok),
43719 + atomic_read_unchecked(&fscache_n_allocs_wait),
43720 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
43721 + atomic_read_unchecked(&fscache_n_allocs_intr));
43722 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43723 - atomic_read(&fscache_n_alloc_ops),
43724 - atomic_read(&fscache_n_alloc_op_waits),
43725 - atomic_read(&fscache_n_allocs_object_dead));
43726 + atomic_read_unchecked(&fscache_n_alloc_ops),
43727 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43728 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43729
43730 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43731 " int=%u oom=%u\n",
43732 - atomic_read(&fscache_n_retrievals),
43733 - atomic_read(&fscache_n_retrievals_ok),
43734 - atomic_read(&fscache_n_retrievals_wait),
43735 - atomic_read(&fscache_n_retrievals_nodata),
43736 - atomic_read(&fscache_n_retrievals_nobufs),
43737 - atomic_read(&fscache_n_retrievals_intr),
43738 - atomic_read(&fscache_n_retrievals_nomem));
43739 + atomic_read_unchecked(&fscache_n_retrievals),
43740 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43741 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43742 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43743 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43744 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43745 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43746 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43747 - atomic_read(&fscache_n_retrieval_ops),
43748 - atomic_read(&fscache_n_retrieval_op_waits),
43749 - atomic_read(&fscache_n_retrievals_object_dead));
43750 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43751 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43752 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43753
43754 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43755 - atomic_read(&fscache_n_stores),
43756 - atomic_read(&fscache_n_stores_ok),
43757 - atomic_read(&fscache_n_stores_again),
43758 - atomic_read(&fscache_n_stores_nobufs),
43759 - atomic_read(&fscache_n_stores_oom));
43760 + atomic_read_unchecked(&fscache_n_stores),
43761 + atomic_read_unchecked(&fscache_n_stores_ok),
43762 + atomic_read_unchecked(&fscache_n_stores_again),
43763 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43764 + atomic_read_unchecked(&fscache_n_stores_oom));
43765 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43766 - atomic_read(&fscache_n_store_ops),
43767 - atomic_read(&fscache_n_store_calls),
43768 - atomic_read(&fscache_n_store_pages),
43769 - atomic_read(&fscache_n_store_radix_deletes),
43770 - atomic_read(&fscache_n_store_pages_over_limit));
43771 + atomic_read_unchecked(&fscache_n_store_ops),
43772 + atomic_read_unchecked(&fscache_n_store_calls),
43773 + atomic_read_unchecked(&fscache_n_store_pages),
43774 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43775 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43776
43777 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43778 - atomic_read(&fscache_n_store_vmscan_not_storing),
43779 - atomic_read(&fscache_n_store_vmscan_gone),
43780 - atomic_read(&fscache_n_store_vmscan_busy),
43781 - atomic_read(&fscache_n_store_vmscan_cancelled));
43782 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43783 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43784 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43785 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43786
43787 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43788 - atomic_read(&fscache_n_op_pend),
43789 - atomic_read(&fscache_n_op_run),
43790 - atomic_read(&fscache_n_op_enqueue),
43791 - atomic_read(&fscache_n_op_cancelled),
43792 - atomic_read(&fscache_n_op_rejected));
43793 + atomic_read_unchecked(&fscache_n_op_pend),
43794 + atomic_read_unchecked(&fscache_n_op_run),
43795 + atomic_read_unchecked(&fscache_n_op_enqueue),
43796 + atomic_read_unchecked(&fscache_n_op_cancelled),
43797 + atomic_read_unchecked(&fscache_n_op_rejected));
43798 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43799 - atomic_read(&fscache_n_op_deferred_release),
43800 - atomic_read(&fscache_n_op_release),
43801 - atomic_read(&fscache_n_op_gc));
43802 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43803 + atomic_read_unchecked(&fscache_n_op_release),
43804 + atomic_read_unchecked(&fscache_n_op_gc));
43805
43806 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43807 atomic_read(&fscache_n_cop_alloc_object),
43808 diff -urNp linux-3.0.8/fs/fs_struct.c linux-3.0.8/fs/fs_struct.c
43809 --- linux-3.0.8/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
43810 +++ linux-3.0.8/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
43811 @@ -4,6 +4,7 @@
43812 #include <linux/path.h>
43813 #include <linux/slab.h>
43814 #include <linux/fs_struct.h>
43815 +#include <linux/grsecurity.h>
43816 #include "internal.h"
43817
43818 static inline void path_get_longterm(struct path *path)
43819 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43820 old_root = fs->root;
43821 fs->root = *path;
43822 path_get_longterm(path);
43823 + gr_set_chroot_entries(current, path);
43824 write_seqcount_end(&fs->seq);
43825 spin_unlock(&fs->lock);
43826 if (old_root.dentry)
43827 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43828 && fs->root.mnt == old_root->mnt) {
43829 path_get_longterm(new_root);
43830 fs->root = *new_root;
43831 + gr_set_chroot_entries(p, new_root);
43832 count++;
43833 }
43834 if (fs->pwd.dentry == old_root->dentry
43835 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43836 spin_lock(&fs->lock);
43837 write_seqcount_begin(&fs->seq);
43838 tsk->fs = NULL;
43839 - kill = !--fs->users;
43840 + gr_clear_chroot_entries(tsk);
43841 + kill = !atomic_dec_return(&fs->users);
43842 write_seqcount_end(&fs->seq);
43843 spin_unlock(&fs->lock);
43844 task_unlock(tsk);
43845 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43846 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43847 /* We don't need to lock fs - think why ;-) */
43848 if (fs) {
43849 - fs->users = 1;
43850 + atomic_set(&fs->users, 1);
43851 fs->in_exec = 0;
43852 spin_lock_init(&fs->lock);
43853 seqcount_init(&fs->seq);
43854 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43855 spin_lock(&old->lock);
43856 fs->root = old->root;
43857 path_get_longterm(&fs->root);
43858 + /* instead of calling gr_set_chroot_entries here,
43859 + we call it from every caller of this function
43860 + */
43861 fs->pwd = old->pwd;
43862 path_get_longterm(&fs->pwd);
43863 spin_unlock(&old->lock);
43864 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43865
43866 task_lock(current);
43867 spin_lock(&fs->lock);
43868 - kill = !--fs->users;
43869 + kill = !atomic_dec_return(&fs->users);
43870 current->fs = new_fs;
43871 + gr_set_chroot_entries(current, &new_fs->root);
43872 spin_unlock(&fs->lock);
43873 task_unlock(current);
43874
43875 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43876
43877 /* to be mentioned only in INIT_TASK */
43878 struct fs_struct init_fs = {
43879 - .users = 1,
43880 + .users = ATOMIC_INIT(1),
43881 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43882 .seq = SEQCNT_ZERO,
43883 .umask = 0022,
43884 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43885 task_lock(current);
43886
43887 spin_lock(&init_fs.lock);
43888 - init_fs.users++;
43889 + atomic_inc(&init_fs.users);
43890 spin_unlock(&init_fs.lock);
43891
43892 spin_lock(&fs->lock);
43893 current->fs = &init_fs;
43894 - kill = !--fs->users;
43895 + gr_set_chroot_entries(current, &current->fs->root);
43896 + kill = !atomic_dec_return(&fs->users);
43897 spin_unlock(&fs->lock);
43898
43899 task_unlock(current);
43900 diff -urNp linux-3.0.8/fs/fuse/cuse.c linux-3.0.8/fs/fuse/cuse.c
43901 --- linux-3.0.8/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
43902 +++ linux-3.0.8/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
43903 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
43904 INIT_LIST_HEAD(&cuse_conntbl[i]);
43905
43906 /* inherit and extend fuse_dev_operations */
43907 - cuse_channel_fops = fuse_dev_operations;
43908 - cuse_channel_fops.owner = THIS_MODULE;
43909 - cuse_channel_fops.open = cuse_channel_open;
43910 - cuse_channel_fops.release = cuse_channel_release;
43911 + pax_open_kernel();
43912 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43913 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43914 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43915 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43916 + pax_close_kernel();
43917
43918 cuse_class = class_create(THIS_MODULE, "cuse");
43919 if (IS_ERR(cuse_class))
43920 diff -urNp linux-3.0.8/fs/fuse/dev.c linux-3.0.8/fs/fuse/dev.c
43921 --- linux-3.0.8/fs/fuse/dev.c 2011-10-25 09:10:33.000000000 -0400
43922 +++ linux-3.0.8/fs/fuse/dev.c 2011-10-25 09:10:41.000000000 -0400
43923 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(stru
43924 ret = 0;
43925 pipe_lock(pipe);
43926
43927 - if (!pipe->readers) {
43928 + if (!atomic_read(&pipe->readers)) {
43929 send_sig(SIGPIPE, current, 0);
43930 if (!ret)
43931 ret = -EPIPE;
43932 diff -urNp linux-3.0.8/fs/fuse/dir.c linux-3.0.8/fs/fuse/dir.c
43933 --- linux-3.0.8/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
43934 +++ linux-3.0.8/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
43935 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
43936 return link;
43937 }
43938
43939 -static void free_link(char *link)
43940 +static void free_link(const char *link)
43941 {
43942 if (!IS_ERR(link))
43943 free_page((unsigned long) link);
43944 diff -urNp linux-3.0.8/fs/gfs2/inode.c linux-3.0.8/fs/gfs2/inode.c
43945 --- linux-3.0.8/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
43946 +++ linux-3.0.8/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
43947 @@ -1525,7 +1525,7 @@ out:
43948
43949 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43950 {
43951 - char *s = nd_get_link(nd);
43952 + const char *s = nd_get_link(nd);
43953 if (!IS_ERR(s))
43954 kfree(s);
43955 }
43956 diff -urNp linux-3.0.8/fs/hfsplus/catalog.c linux-3.0.8/fs/hfsplus/catalog.c
43957 --- linux-3.0.8/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
43958 +++ linux-3.0.8/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
43959 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43960 int err;
43961 u16 type;
43962
43963 + pax_track_stack();
43964 +
43965 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43966 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43967 if (err)
43968 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43969 int entry_size;
43970 int err;
43971
43972 + pax_track_stack();
43973 +
43974 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43975 str->name, cnid, inode->i_nlink);
43976 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43977 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
43978 int entry_size, type;
43979 int err = 0;
43980
43981 + pax_track_stack();
43982 +
43983 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43984 cnid, src_dir->i_ino, src_name->name,
43985 dst_dir->i_ino, dst_name->name);
43986 diff -urNp linux-3.0.8/fs/hfsplus/dir.c linux-3.0.8/fs/hfsplus/dir.c
43987 --- linux-3.0.8/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
43988 +++ linux-3.0.8/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
43989 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
43990 struct hfsplus_readdir_data *rd;
43991 u16 type;
43992
43993 + pax_track_stack();
43994 +
43995 if (filp->f_pos >= inode->i_size)
43996 return 0;
43997
43998 diff -urNp linux-3.0.8/fs/hfsplus/inode.c linux-3.0.8/fs/hfsplus/inode.c
43999 --- linux-3.0.8/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
44000 +++ linux-3.0.8/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
44001 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
44002 int res = 0;
44003 u16 type;
44004
44005 + pax_track_stack();
44006 +
44007 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
44008
44009 HFSPLUS_I(inode)->linkid = 0;
44010 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
44011 struct hfs_find_data fd;
44012 hfsplus_cat_entry entry;
44013
44014 + pax_track_stack();
44015 +
44016 if (HFSPLUS_IS_RSRC(inode))
44017 main_inode = HFSPLUS_I(inode)->rsrc_inode;
44018
44019 diff -urNp linux-3.0.8/fs/hfsplus/ioctl.c linux-3.0.8/fs/hfsplus/ioctl.c
44020 --- linux-3.0.8/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
44021 +++ linux-3.0.8/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
44022 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
44023 struct hfsplus_cat_file *file;
44024 int res;
44025
44026 + pax_track_stack();
44027 +
44028 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44029 return -EOPNOTSUPP;
44030
44031 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
44032 struct hfsplus_cat_file *file;
44033 ssize_t res = 0;
44034
44035 + pax_track_stack();
44036 +
44037 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44038 return -EOPNOTSUPP;
44039
44040 diff -urNp linux-3.0.8/fs/hfsplus/super.c linux-3.0.8/fs/hfsplus/super.c
44041 --- linux-3.0.8/fs/hfsplus/super.c 2011-10-25 09:10:33.000000000 -0400
44042 +++ linux-3.0.8/fs/hfsplus/super.c 2011-10-25 09:10:41.000000000 -0400
44043 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
44044 struct nls_table *nls = NULL;
44045 int err;
44046
44047 + pax_track_stack();
44048 +
44049 err = -EINVAL;
44050 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
44051 if (!sbi)
44052 diff -urNp linux-3.0.8/fs/hugetlbfs/inode.c linux-3.0.8/fs/hugetlbfs/inode.c
44053 --- linux-3.0.8/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44054 +++ linux-3.0.8/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
44055 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
44056 .kill_sb = kill_litter_super,
44057 };
44058
44059 -static struct vfsmount *hugetlbfs_vfsmount;
44060 +struct vfsmount *hugetlbfs_vfsmount;
44061
44062 static int can_do_hugetlb_shm(void)
44063 {
44064 diff -urNp linux-3.0.8/fs/inode.c linux-3.0.8/fs/inode.c
44065 --- linux-3.0.8/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
44066 +++ linux-3.0.8/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
44067 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
44068
44069 #ifdef CONFIG_SMP
44070 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44071 - static atomic_t shared_last_ino;
44072 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44073 + static atomic_unchecked_t shared_last_ino;
44074 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44075
44076 res = next - LAST_INO_BATCH;
44077 }
44078 diff -urNp linux-3.0.8/fs/jbd/checkpoint.c linux-3.0.8/fs/jbd/checkpoint.c
44079 --- linux-3.0.8/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
44080 +++ linux-3.0.8/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
44081 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
44082 tid_t this_tid;
44083 int result;
44084
44085 + pax_track_stack();
44086 +
44087 jbd_debug(1, "Start checkpoint\n");
44088
44089 /*
44090 diff -urNp linux-3.0.8/fs/jffs2/compr_rtime.c linux-3.0.8/fs/jffs2/compr_rtime.c
44091 --- linux-3.0.8/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
44092 +++ linux-3.0.8/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
44093 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
44094 int outpos = 0;
44095 int pos=0;
44096
44097 + pax_track_stack();
44098 +
44099 memset(positions,0,sizeof(positions));
44100
44101 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
44102 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
44103 int outpos = 0;
44104 int pos=0;
44105
44106 + pax_track_stack();
44107 +
44108 memset(positions,0,sizeof(positions));
44109
44110 while (outpos<destlen) {
44111 diff -urNp linux-3.0.8/fs/jffs2/compr_rubin.c linux-3.0.8/fs/jffs2/compr_rubin.c
44112 --- linux-3.0.8/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
44113 +++ linux-3.0.8/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
44114 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
44115 int ret;
44116 uint32_t mysrclen, mydstlen;
44117
44118 + pax_track_stack();
44119 +
44120 mysrclen = *sourcelen;
44121 mydstlen = *dstlen - 8;
44122
44123 diff -urNp linux-3.0.8/fs/jffs2/erase.c linux-3.0.8/fs/jffs2/erase.c
44124 --- linux-3.0.8/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
44125 +++ linux-3.0.8/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
44126 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
44127 struct jffs2_unknown_node marker = {
44128 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44129 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44130 - .totlen = cpu_to_je32(c->cleanmarker_size)
44131 + .totlen = cpu_to_je32(c->cleanmarker_size),
44132 + .hdr_crc = cpu_to_je32(0)
44133 };
44134
44135 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44136 diff -urNp linux-3.0.8/fs/jffs2/wbuf.c linux-3.0.8/fs/jffs2/wbuf.c
44137 --- linux-3.0.8/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
44138 +++ linux-3.0.8/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
44139 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
44140 {
44141 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44142 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44143 - .totlen = constant_cpu_to_je32(8)
44144 + .totlen = constant_cpu_to_je32(8),
44145 + .hdr_crc = constant_cpu_to_je32(0)
44146 };
44147
44148 /*
44149 diff -urNp linux-3.0.8/fs/jffs2/xattr.c linux-3.0.8/fs/jffs2/xattr.c
44150 --- linux-3.0.8/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
44151 +++ linux-3.0.8/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
44152 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
44153
44154 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
44155
44156 + pax_track_stack();
44157 +
44158 /* Phase.1 : Merge same xref */
44159 for (i=0; i < XREF_TMPHASH_SIZE; i++)
44160 xref_tmphash[i] = NULL;
44161 diff -urNp linux-3.0.8/fs/jfs/super.c linux-3.0.8/fs/jfs/super.c
44162 --- linux-3.0.8/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
44163 +++ linux-3.0.8/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
44164 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
44165
44166 jfs_inode_cachep =
44167 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44168 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44169 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44170 init_once);
44171 if (jfs_inode_cachep == NULL)
44172 return -ENOMEM;
44173 diff -urNp linux-3.0.8/fs/Kconfig.binfmt linux-3.0.8/fs/Kconfig.binfmt
44174 --- linux-3.0.8/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
44175 +++ linux-3.0.8/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
44176 @@ -86,7 +86,7 @@ config HAVE_AOUT
44177
44178 config BINFMT_AOUT
44179 tristate "Kernel support for a.out and ECOFF binaries"
44180 - depends on HAVE_AOUT
44181 + depends on HAVE_AOUT && BROKEN
44182 ---help---
44183 A.out (Assembler.OUTput) is a set of formats for libraries and
44184 executables used in the earliest versions of UNIX. Linux used
44185 diff -urNp linux-3.0.8/fs/libfs.c linux-3.0.8/fs/libfs.c
44186 --- linux-3.0.8/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
44187 +++ linux-3.0.8/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
44188 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
44189
44190 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44191 struct dentry *next;
44192 + char d_name[sizeof(next->d_iname)];
44193 + const unsigned char *name;
44194 +
44195 next = list_entry(p, struct dentry, d_u.d_child);
44196 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44197 if (!simple_positive(next)) {
44198 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
44199
44200 spin_unlock(&next->d_lock);
44201 spin_unlock(&dentry->d_lock);
44202 - if (filldir(dirent, next->d_name.name,
44203 + name = next->d_name.name;
44204 + if (name == next->d_iname) {
44205 + memcpy(d_name, name, next->d_name.len);
44206 + name = d_name;
44207 + }
44208 + if (filldir(dirent, name,
44209 next->d_name.len, filp->f_pos,
44210 next->d_inode->i_ino,
44211 dt_type(next->d_inode)) < 0)
44212 diff -urNp linux-3.0.8/fs/lockd/clntproc.c linux-3.0.8/fs/lockd/clntproc.c
44213 --- linux-3.0.8/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
44214 +++ linux-3.0.8/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
44215 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
44216 /*
44217 * Cookie counter for NLM requests
44218 */
44219 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44220 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44221
44222 void nlmclnt_next_cookie(struct nlm_cookie *c)
44223 {
44224 - u32 cookie = atomic_inc_return(&nlm_cookie);
44225 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44226
44227 memcpy(c->data, &cookie, 4);
44228 c->len=4;
44229 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
44230 struct nlm_rqst reqst, *req;
44231 int status;
44232
44233 + pax_track_stack();
44234 +
44235 req = &reqst;
44236 memset(req, 0, sizeof(*req));
44237 locks_init_lock(&req->a_args.lock.fl);
44238 diff -urNp linux-3.0.8/fs/locks.c linux-3.0.8/fs/locks.c
44239 --- linux-3.0.8/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
44240 +++ linux-3.0.8/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
44241 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
44242 return;
44243
44244 if (filp->f_op && filp->f_op->flock) {
44245 - struct file_lock fl = {
44246 + struct file_lock flock = {
44247 .fl_pid = current->tgid,
44248 .fl_file = filp,
44249 .fl_flags = FL_FLOCK,
44250 .fl_type = F_UNLCK,
44251 .fl_end = OFFSET_MAX,
44252 };
44253 - filp->f_op->flock(filp, F_SETLKW, &fl);
44254 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44255 - fl.fl_ops->fl_release_private(&fl);
44256 + filp->f_op->flock(filp, F_SETLKW, &flock);
44257 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44258 + flock.fl_ops->fl_release_private(&flock);
44259 }
44260
44261 lock_flocks();
44262 diff -urNp linux-3.0.8/fs/logfs/super.c linux-3.0.8/fs/logfs/super.c
44263 --- linux-3.0.8/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
44264 +++ linux-3.0.8/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
44265 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
44266 struct logfs_disk_super _ds1, *ds1 = &_ds1;
44267 int err, valid0, valid1;
44268
44269 + pax_track_stack();
44270 +
44271 /* read first superblock */
44272 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
44273 if (err)
44274 diff -urNp linux-3.0.8/fs/namei.c linux-3.0.8/fs/namei.c
44275 --- linux-3.0.8/fs/namei.c 2011-10-24 08:05:30.000000000 -0400
44276 +++ linux-3.0.8/fs/namei.c 2011-10-19 10:09:26.000000000 -0400
44277 @@ -237,21 +237,23 @@ int generic_permission(struct inode *ino
44278 return ret;
44279
44280 /*
44281 - * Read/write DACs are always overridable.
44282 - * Executable DACs are overridable for all directories and
44283 - * for non-directories that have least one exec bit set.
44284 + * Searching includes executable on directories, else just read.
44285 */
44286 - if (!(mask & MAY_EXEC) || execute_ok(inode))
44287 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44288 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44289 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
44290 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44291 return 0;
44292 + }
44293
44294 /*
44295 - * Searching includes executable on directories, else just read.
44296 + * Read/write DACs are always overridable.
44297 + * Executable DACs are overridable for all directories and
44298 + * for non-directories that have least one exec bit set.
44299 */
44300 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44301 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
44302 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44303 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
44304 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44305 return 0;
44306 + }
44307
44308 return -EACCES;
44309 }
44310 @@ -593,9 +595,12 @@ static inline int exec_permission(struct
44311 if (ret == -ECHILD)
44312 return ret;
44313
44314 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
44315 - ns_capable(ns, CAP_DAC_READ_SEARCH))
44316 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
44317 goto ok;
44318 + else {
44319 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
44320 + goto ok;
44321 + }
44322
44323 return ret;
44324 ok:
44325 @@ -703,11 +708,19 @@ follow_link(struct path *link, struct na
44326 return error;
44327 }
44328
44329 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44330 + dentry->d_inode, dentry, nd->path.mnt)) {
44331 + error = -EACCES;
44332 + *p = ERR_PTR(error); /* no ->put_link(), please */
44333 + path_put(&nd->path);
44334 + return error;
44335 + }
44336 +
44337 nd->last_type = LAST_BIND;
44338 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44339 error = PTR_ERR(*p);
44340 if (!IS_ERR(*p)) {
44341 - char *s = nd_get_link(nd);
44342 + const char *s = nd_get_link(nd);
44343 error = 0;
44344 if (s)
44345 error = __vfs_follow_link(nd, s);
44346 @@ -1598,6 +1611,12 @@ static int path_lookupat(int dfd, const
44347 if (!err)
44348 err = complete_walk(nd);
44349
44350 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44351 + if (!err)
44352 + path_put(&nd->path);
44353 + err = -ENOENT;
44354 + }
44355 +
44356 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44357 if (!nd->inode->i_op->lookup) {
44358 path_put(&nd->path);
44359 @@ -1625,6 +1644,9 @@ static int do_path_lookup(int dfd, const
44360 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44361
44362 if (likely(!retval)) {
44363 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44364 + return -ENOENT;
44365 +
44366 if (unlikely(!audit_dummy_context())) {
44367 if (nd->path.dentry && nd->inode)
44368 audit_inode(name, nd->path.dentry);
44369 @@ -1935,6 +1957,30 @@ int vfs_create(struct inode *dir, struct
44370 return error;
44371 }
44372
44373 +/*
44374 + * Note that while the flag value (low two bits) for sys_open means:
44375 + * 00 - read-only
44376 + * 01 - write-only
44377 + * 10 - read-write
44378 + * 11 - special
44379 + * it is changed into
44380 + * 00 - no permissions needed
44381 + * 01 - read-permission
44382 + * 10 - write-permission
44383 + * 11 - read-write
44384 + * for the internal routines (ie open_namei()/follow_link() etc)
44385 + * This is more logical, and also allows the 00 "no perm needed"
44386 + * to be used for symlinks (where the permissions are checked
44387 + * later).
44388 + *
44389 +*/
44390 +static inline int open_to_namei_flags(int flag)
44391 +{
44392 + if ((flag+1) & O_ACCMODE)
44393 + flag++;
44394 + return flag;
44395 +}
44396 +
44397 static int may_open(struct path *path, int acc_mode, int flag)
44398 {
44399 struct dentry *dentry = path->dentry;
44400 @@ -1987,7 +2033,27 @@ static int may_open(struct path *path, i
44401 /*
44402 * Ensure there are no outstanding leases on the file.
44403 */
44404 - return break_lease(inode, flag);
44405 + error = break_lease(inode, flag);
44406 +
44407 + if (error)
44408 + return error;
44409 +
44410 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44411 + error = -EPERM;
44412 + goto exit;
44413 + }
44414 +
44415 + if (gr_handle_rawio(inode)) {
44416 + error = -EPERM;
44417 + goto exit;
44418 + }
44419 +
44420 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
44421 + error = -EACCES;
44422 + goto exit;
44423 + }
44424 +exit:
44425 + return error;
44426 }
44427
44428 static int handle_truncate(struct file *filp)
44429 @@ -2013,30 +2079,6 @@ static int handle_truncate(struct file *
44430 }
44431
44432 /*
44433 - * Note that while the flag value (low two bits) for sys_open means:
44434 - * 00 - read-only
44435 - * 01 - write-only
44436 - * 10 - read-write
44437 - * 11 - special
44438 - * it is changed into
44439 - * 00 - no permissions needed
44440 - * 01 - read-permission
44441 - * 10 - write-permission
44442 - * 11 - read-write
44443 - * for the internal routines (ie open_namei()/follow_link() etc)
44444 - * This is more logical, and also allows the 00 "no perm needed"
44445 - * to be used for symlinks (where the permissions are checked
44446 - * later).
44447 - *
44448 -*/
44449 -static inline int open_to_namei_flags(int flag)
44450 -{
44451 - if ((flag+1) & O_ACCMODE)
44452 - flag++;
44453 - return flag;
44454 -}
44455 -
44456 -/*
44457 * Handle the last step of open()
44458 */
44459 static struct file *do_last(struct nameidata *nd, struct path *path,
44460 @@ -2045,6 +2087,7 @@ static struct file *do_last(struct namei
44461 struct dentry *dir = nd->path.dentry;
44462 struct dentry *dentry;
44463 int open_flag = op->open_flag;
44464 + int flag = open_to_namei_flags(open_flag);
44465 int will_truncate = open_flag & O_TRUNC;
44466 int want_write = 0;
44467 int acc_mode = op->acc_mode;
44468 @@ -2065,6 +2108,10 @@ static struct file *do_last(struct namei
44469 error = complete_walk(nd);
44470 if (error)
44471 return ERR_PTR(error);
44472 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44473 + error = -ENOENT;
44474 + goto exit;
44475 + }
44476 audit_inode(pathname, nd->path.dentry);
44477 if (open_flag & O_CREAT) {
44478 error = -EISDIR;
44479 @@ -2075,6 +2122,10 @@ static struct file *do_last(struct namei
44480 error = complete_walk(nd);
44481 if (error)
44482 return ERR_PTR(error);
44483 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44484 + error = -ENOENT;
44485 + goto exit;
44486 + }
44487 audit_inode(pathname, dir);
44488 goto ok;
44489 }
44490 @@ -2097,6 +2148,11 @@ static struct file *do_last(struct namei
44491 if (error)
44492 return ERR_PTR(-ECHILD);
44493
44494 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44495 + error = -ENOENT;
44496 + goto exit;
44497 + }
44498 +
44499 error = -ENOTDIR;
44500 if (nd->flags & LOOKUP_DIRECTORY) {
44501 if (!nd->inode->i_op->lookup)
44502 @@ -2132,6 +2188,12 @@ static struct file *do_last(struct namei
44503 /* Negative dentry, just create the file */
44504 if (!dentry->d_inode) {
44505 int mode = op->mode;
44506 +
44507 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
44508 + error = -EACCES;
44509 + goto exit_mutex_unlock;
44510 + }
44511 +
44512 if (!IS_POSIXACL(dir->d_inode))
44513 mode &= ~current_umask();
44514 /*
44515 @@ -2155,6 +2217,8 @@ static struct file *do_last(struct namei
44516 error = vfs_create(dir->d_inode, dentry, mode, nd);
44517 if (error)
44518 goto exit_mutex_unlock;
44519 + else
44520 + gr_handle_create(path->dentry, path->mnt);
44521 mutex_unlock(&dir->d_inode->i_mutex);
44522 dput(nd->path.dentry);
44523 nd->path.dentry = dentry;
44524 @@ -2164,6 +2228,19 @@ static struct file *do_last(struct namei
44525 /*
44526 * It already exists.
44527 */
44528 +
44529 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44530 + error = -ENOENT;
44531 + goto exit_mutex_unlock;
44532 + }
44533 +
44534 + /* only check if O_CREAT is specified, all other checks need to go
44535 + into may_open */
44536 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
44537 + error = -EACCES;
44538 + goto exit_mutex_unlock;
44539 + }
44540 +
44541 mutex_unlock(&dir->d_inode->i_mutex);
44542 audit_inode(pathname, path->dentry);
44543
44544 @@ -2373,6 +2450,10 @@ struct dentry *lookup_create(struct name
44545 }
44546 return dentry;
44547 eexist:
44548 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44549 + dput(dentry);
44550 + return ERR_PTR(-ENOENT);
44551 + }
44552 dput(dentry);
44553 dentry = ERR_PTR(-EEXIST);
44554 fail:
44555 @@ -2450,6 +2531,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44556 error = may_mknod(mode);
44557 if (error)
44558 goto out_dput;
44559 +
44560 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
44561 + error = -EPERM;
44562 + goto out_dput;
44563 + }
44564 +
44565 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
44566 + error = -EACCES;
44567 + goto out_dput;
44568 + }
44569 +
44570 error = mnt_want_write(nd.path.mnt);
44571 if (error)
44572 goto out_dput;
44573 @@ -2470,6 +2562,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44574 }
44575 out_drop_write:
44576 mnt_drop_write(nd.path.mnt);
44577 +
44578 + if (!error)
44579 + gr_handle_create(dentry, nd.path.mnt);
44580 out_dput:
44581 dput(dentry);
44582 out_unlock:
44583 @@ -2522,6 +2617,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44584 if (IS_ERR(dentry))
44585 goto out_unlock;
44586
44587 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
44588 + error = -EACCES;
44589 + goto out_dput;
44590 + }
44591 +
44592 if (!IS_POSIXACL(nd.path.dentry->d_inode))
44593 mode &= ~current_umask();
44594 error = mnt_want_write(nd.path.mnt);
44595 @@ -2533,6 +2633,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44596 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
44597 out_drop_write:
44598 mnt_drop_write(nd.path.mnt);
44599 +
44600 + if (!error)
44601 + gr_handle_create(dentry, nd.path.mnt);
44602 +
44603 out_dput:
44604 dput(dentry);
44605 out_unlock:
44606 @@ -2615,6 +2719,8 @@ static long do_rmdir(int dfd, const char
44607 char * name;
44608 struct dentry *dentry;
44609 struct nameidata nd;
44610 + ino_t saved_ino = 0;
44611 + dev_t saved_dev = 0;
44612
44613 error = user_path_parent(dfd, pathname, &nd, &name);
44614 if (error)
44615 @@ -2643,6 +2749,17 @@ static long do_rmdir(int dfd, const char
44616 error = -ENOENT;
44617 goto exit3;
44618 }
44619 +
44620 + if (dentry->d_inode->i_nlink <= 1) {
44621 + saved_ino = dentry->d_inode->i_ino;
44622 + saved_dev = gr_get_dev_from_dentry(dentry);
44623 + }
44624 +
44625 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44626 + error = -EACCES;
44627 + goto exit3;
44628 + }
44629 +
44630 error = mnt_want_write(nd.path.mnt);
44631 if (error)
44632 goto exit3;
44633 @@ -2650,6 +2767,8 @@ static long do_rmdir(int dfd, const char
44634 if (error)
44635 goto exit4;
44636 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44637 + if (!error && (saved_dev || saved_ino))
44638 + gr_handle_delete(saved_ino, saved_dev);
44639 exit4:
44640 mnt_drop_write(nd.path.mnt);
44641 exit3:
44642 @@ -2712,6 +2831,8 @@ static long do_unlinkat(int dfd, const c
44643 struct dentry *dentry;
44644 struct nameidata nd;
44645 struct inode *inode = NULL;
44646 + ino_t saved_ino = 0;
44647 + dev_t saved_dev = 0;
44648
44649 error = user_path_parent(dfd, pathname, &nd, &name);
44650 if (error)
44651 @@ -2734,6 +2855,16 @@ static long do_unlinkat(int dfd, const c
44652 if (!inode)
44653 goto slashes;
44654 ihold(inode);
44655 +
44656 + if (inode->i_nlink <= 1) {
44657 + saved_ino = inode->i_ino;
44658 + saved_dev = gr_get_dev_from_dentry(dentry);
44659 + }
44660 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44661 + error = -EACCES;
44662 + goto exit2;
44663 + }
44664 +
44665 error = mnt_want_write(nd.path.mnt);
44666 if (error)
44667 goto exit2;
44668 @@ -2741,6 +2872,8 @@ static long do_unlinkat(int dfd, const c
44669 if (error)
44670 goto exit3;
44671 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44672 + if (!error && (saved_ino || saved_dev))
44673 + gr_handle_delete(saved_ino, saved_dev);
44674 exit3:
44675 mnt_drop_write(nd.path.mnt);
44676 exit2:
44677 @@ -2818,6 +2951,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44678 if (IS_ERR(dentry))
44679 goto out_unlock;
44680
44681 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44682 + error = -EACCES;
44683 + goto out_dput;
44684 + }
44685 +
44686 error = mnt_want_write(nd.path.mnt);
44687 if (error)
44688 goto out_dput;
44689 @@ -2825,6 +2963,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44690 if (error)
44691 goto out_drop_write;
44692 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44693 + if (!error)
44694 + gr_handle_create(dentry, nd.path.mnt);
44695 out_drop_write:
44696 mnt_drop_write(nd.path.mnt);
44697 out_dput:
44698 @@ -2933,6 +3073,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44699 error = PTR_ERR(new_dentry);
44700 if (IS_ERR(new_dentry))
44701 goto out_unlock;
44702 +
44703 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44704 + old_path.dentry->d_inode,
44705 + old_path.dentry->d_inode->i_mode, to)) {
44706 + error = -EACCES;
44707 + goto out_dput;
44708 + }
44709 +
44710 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44711 + old_path.dentry, old_path.mnt, to)) {
44712 + error = -EACCES;
44713 + goto out_dput;
44714 + }
44715 +
44716 error = mnt_want_write(nd.path.mnt);
44717 if (error)
44718 goto out_dput;
44719 @@ -2940,6 +3094,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44720 if (error)
44721 goto out_drop_write;
44722 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44723 + if (!error)
44724 + gr_handle_create(new_dentry, nd.path.mnt);
44725 out_drop_write:
44726 mnt_drop_write(nd.path.mnt);
44727 out_dput:
44728 @@ -3117,6 +3273,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44729 char *to;
44730 int error;
44731
44732 + pax_track_stack();
44733 +
44734 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44735 if (error)
44736 goto exit;
44737 @@ -3173,6 +3331,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44738 if (new_dentry == trap)
44739 goto exit5;
44740
44741 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44742 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44743 + to);
44744 + if (error)
44745 + goto exit5;
44746 +
44747 error = mnt_want_write(oldnd.path.mnt);
44748 if (error)
44749 goto exit5;
44750 @@ -3182,6 +3346,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44751 goto exit6;
44752 error = vfs_rename(old_dir->d_inode, old_dentry,
44753 new_dir->d_inode, new_dentry);
44754 + if (!error)
44755 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44756 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44757 exit6:
44758 mnt_drop_write(oldnd.path.mnt);
44759 exit5:
44760 @@ -3207,6 +3374,8 @@ SYSCALL_DEFINE2(rename, const char __use
44761
44762 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44763 {
44764 + char tmpbuf[64];
44765 + const char *newlink;
44766 int len;
44767
44768 len = PTR_ERR(link);
44769 @@ -3216,7 +3385,14 @@ int vfs_readlink(struct dentry *dentry,
44770 len = strlen(link);
44771 if (len > (unsigned) buflen)
44772 len = buflen;
44773 - if (copy_to_user(buffer, link, len))
44774 +
44775 + if (len < sizeof(tmpbuf)) {
44776 + memcpy(tmpbuf, link, len);
44777 + newlink = tmpbuf;
44778 + } else
44779 + newlink = link;
44780 +
44781 + if (copy_to_user(buffer, newlink, len))
44782 len = -EFAULT;
44783 out:
44784 return len;
44785 diff -urNp linux-3.0.8/fs/namespace.c linux-3.0.8/fs/namespace.c
44786 --- linux-3.0.8/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
44787 +++ linux-3.0.8/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
44788 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
44789 if (!(sb->s_flags & MS_RDONLY))
44790 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44791 up_write(&sb->s_umount);
44792 +
44793 + gr_log_remount(mnt->mnt_devname, retval);
44794 +
44795 return retval;
44796 }
44797
44798 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
44799 br_write_unlock(vfsmount_lock);
44800 up_write(&namespace_sem);
44801 release_mounts(&umount_list);
44802 +
44803 + gr_log_unmount(mnt->mnt_devname, retval);
44804 +
44805 return retval;
44806 }
44807
44808 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
44809 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44810 MS_STRICTATIME);
44811
44812 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44813 + retval = -EPERM;
44814 + goto dput_out;
44815 + }
44816 +
44817 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44818 + retval = -EPERM;
44819 + goto dput_out;
44820 + }
44821 +
44822 if (flags & MS_REMOUNT)
44823 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44824 data_page);
44825 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
44826 dev_name, data_page);
44827 dput_out:
44828 path_put(&path);
44829 +
44830 + gr_log_mount(dev_name, dir_name, retval);
44831 +
44832 return retval;
44833 }
44834
44835 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44836 if (error)
44837 goto out2;
44838
44839 + if (gr_handle_chroot_pivot()) {
44840 + error = -EPERM;
44841 + goto out2;
44842 + }
44843 +
44844 get_fs_root(current->fs, &root);
44845 error = lock_mount(&old);
44846 if (error)
44847 diff -urNp linux-3.0.8/fs/ncpfs/dir.c linux-3.0.8/fs/ncpfs/dir.c
44848 --- linux-3.0.8/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44849 +++ linux-3.0.8/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
44850 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44851 int res, val = 0, len;
44852 __u8 __name[NCP_MAXPATHLEN + 1];
44853
44854 + pax_track_stack();
44855 +
44856 if (dentry == dentry->d_sb->s_root)
44857 return 1;
44858
44859 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44860 int error, res, len;
44861 __u8 __name[NCP_MAXPATHLEN + 1];
44862
44863 + pax_track_stack();
44864 +
44865 error = -EIO;
44866 if (!ncp_conn_valid(server))
44867 goto finished;
44868 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44869 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44870 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44871
44872 + pax_track_stack();
44873 +
44874 ncp_age_dentry(server, dentry);
44875 len = sizeof(__name);
44876 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44877 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44878 int error, len;
44879 __u8 __name[NCP_MAXPATHLEN + 1];
44880
44881 + pax_track_stack();
44882 +
44883 DPRINTK("ncp_mkdir: making %s/%s\n",
44884 dentry->d_parent->d_name.name, dentry->d_name.name);
44885
44886 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44887 int old_len, new_len;
44888 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44889
44890 + pax_track_stack();
44891 +
44892 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44893 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44894 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44895 diff -urNp linux-3.0.8/fs/ncpfs/inode.c linux-3.0.8/fs/ncpfs/inode.c
44896 --- linux-3.0.8/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44897 +++ linux-3.0.8/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
44898 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44899 #endif
44900 struct ncp_entry_info finfo;
44901
44902 + pax_track_stack();
44903 +
44904 memset(&data, 0, sizeof(data));
44905 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44906 if (!server)
44907 diff -urNp linux-3.0.8/fs/nfs/inode.c linux-3.0.8/fs/nfs/inode.c
44908 --- linux-3.0.8/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44909 +++ linux-3.0.8/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
44910 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44911 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44912 nfsi->attrtimeo_timestamp = jiffies;
44913
44914 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44915 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44916 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44917 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44918 else
44919 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
44920 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44921 }
44922
44923 -static atomic_long_t nfs_attr_generation_counter;
44924 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44925
44926 static unsigned long nfs_read_attr_generation_counter(void)
44927 {
44928 - return atomic_long_read(&nfs_attr_generation_counter);
44929 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44930 }
44931
44932 unsigned long nfs_inc_attr_generation_counter(void)
44933 {
44934 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44935 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44936 }
44937
44938 void nfs_fattr_init(struct nfs_fattr *fattr)
44939 diff -urNp linux-3.0.8/fs/nfsd/nfs4state.c linux-3.0.8/fs/nfsd/nfs4state.c
44940 --- linux-3.0.8/fs/nfsd/nfs4state.c 2011-10-24 08:05:21.000000000 -0400
44941 +++ linux-3.0.8/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
44942 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44943 unsigned int strhashval;
44944 int err;
44945
44946 + pax_track_stack();
44947 +
44948 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44949 (long long) lock->lk_offset,
44950 (long long) lock->lk_length);
44951 diff -urNp linux-3.0.8/fs/nfsd/nfs4xdr.c linux-3.0.8/fs/nfsd/nfs4xdr.c
44952 --- linux-3.0.8/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
44953 +++ linux-3.0.8/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
44954 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44955 .dentry = dentry,
44956 };
44957
44958 + pax_track_stack();
44959 +
44960 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44961 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44962 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44963 diff -urNp linux-3.0.8/fs/nfsd/vfs.c linux-3.0.8/fs/nfsd/vfs.c
44964 --- linux-3.0.8/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
44965 +++ linux-3.0.8/fs/nfsd/vfs.c 2011-10-06 04:17:55.000000000 -0400
44966 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44967 } else {
44968 oldfs = get_fs();
44969 set_fs(KERNEL_DS);
44970 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44971 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44972 set_fs(oldfs);
44973 }
44974
44975 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44976
44977 /* Write the data. */
44978 oldfs = get_fs(); set_fs(KERNEL_DS);
44979 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44980 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44981 set_fs(oldfs);
44982 if (host_err < 0)
44983 goto out_nfserr;
44984 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44985 */
44986
44987 oldfs = get_fs(); set_fs(KERNEL_DS);
44988 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44989 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44990 set_fs(oldfs);
44991
44992 if (host_err < 0)
44993 diff -urNp linux-3.0.8/fs/notify/fanotify/fanotify_user.c linux-3.0.8/fs/notify/fanotify/fanotify_user.c
44994 --- linux-3.0.8/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
44995 +++ linux-3.0.8/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
44996 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44997 goto out_close_fd;
44998
44999 ret = -EFAULT;
45000 - if (copy_to_user(buf, &fanotify_event_metadata,
45001 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45002 + copy_to_user(buf, &fanotify_event_metadata,
45003 fanotify_event_metadata.event_len))
45004 goto out_kill_access_response;
45005
45006 diff -urNp linux-3.0.8/fs/notify/notification.c linux-3.0.8/fs/notify/notification.c
45007 --- linux-3.0.8/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
45008 +++ linux-3.0.8/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
45009 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
45010 * get set to 0 so it will never get 'freed'
45011 */
45012 static struct fsnotify_event *q_overflow_event;
45013 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45014 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45015
45016 /**
45017 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45018 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
45019 */
45020 u32 fsnotify_get_cookie(void)
45021 {
45022 - return atomic_inc_return(&fsnotify_sync_cookie);
45023 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45024 }
45025 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45026
45027 diff -urNp linux-3.0.8/fs/ntfs/dir.c linux-3.0.8/fs/ntfs/dir.c
45028 --- linux-3.0.8/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
45029 +++ linux-3.0.8/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
45030 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
45031 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45032 ~(s64)(ndir->itype.index.block_size - 1)));
45033 /* Bounds checks. */
45034 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45035 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45036 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45037 "inode 0x%lx or driver bug.", vdir->i_ino);
45038 goto err_out;
45039 diff -urNp linux-3.0.8/fs/ntfs/file.c linux-3.0.8/fs/ntfs/file.c
45040 --- linux-3.0.8/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
45041 +++ linux-3.0.8/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
45042 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
45043 #endif /* NTFS_RW */
45044 };
45045
45046 -const struct file_operations ntfs_empty_file_ops = {};
45047 +const struct file_operations ntfs_empty_file_ops __read_only;
45048
45049 -const struct inode_operations ntfs_empty_inode_ops = {};
45050 +const struct inode_operations ntfs_empty_inode_ops __read_only;
45051 diff -urNp linux-3.0.8/fs/ocfs2/localalloc.c linux-3.0.8/fs/ocfs2/localalloc.c
45052 --- linux-3.0.8/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
45053 +++ linux-3.0.8/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
45054 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
45055 goto bail;
45056 }
45057
45058 - atomic_inc(&osb->alloc_stats.moves);
45059 + atomic_inc_unchecked(&osb->alloc_stats.moves);
45060
45061 bail:
45062 if (handle)
45063 diff -urNp linux-3.0.8/fs/ocfs2/namei.c linux-3.0.8/fs/ocfs2/namei.c
45064 --- linux-3.0.8/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
45065 +++ linux-3.0.8/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
45066 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
45067 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
45068 struct ocfs2_dir_lookup_result target_insert = { NULL, };
45069
45070 + pax_track_stack();
45071 +
45072 /* At some point it might be nice to break this function up a
45073 * bit. */
45074
45075 diff -urNp linux-3.0.8/fs/ocfs2/ocfs2.h linux-3.0.8/fs/ocfs2/ocfs2.h
45076 --- linux-3.0.8/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
45077 +++ linux-3.0.8/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
45078 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
45079
45080 struct ocfs2_alloc_stats
45081 {
45082 - atomic_t moves;
45083 - atomic_t local_data;
45084 - atomic_t bitmap_data;
45085 - atomic_t bg_allocs;
45086 - atomic_t bg_extends;
45087 + atomic_unchecked_t moves;
45088 + atomic_unchecked_t local_data;
45089 + atomic_unchecked_t bitmap_data;
45090 + atomic_unchecked_t bg_allocs;
45091 + atomic_unchecked_t bg_extends;
45092 };
45093
45094 enum ocfs2_local_alloc_state
45095 diff -urNp linux-3.0.8/fs/ocfs2/suballoc.c linux-3.0.8/fs/ocfs2/suballoc.c
45096 --- linux-3.0.8/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
45097 +++ linux-3.0.8/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
45098 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
45099 mlog_errno(status);
45100 goto bail;
45101 }
45102 - atomic_inc(&osb->alloc_stats.bg_extends);
45103 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45104
45105 /* You should never ask for this much metadata */
45106 BUG_ON(bits_wanted >
45107 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
45108 mlog_errno(status);
45109 goto bail;
45110 }
45111 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45112 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45113
45114 *suballoc_loc = res.sr_bg_blkno;
45115 *suballoc_bit_start = res.sr_bit_offset;
45116 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
45117 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45118 res->sr_bits);
45119
45120 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45121 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45122
45123 BUG_ON(res->sr_bits != 1);
45124
45125 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
45126 mlog_errno(status);
45127 goto bail;
45128 }
45129 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45130 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45131
45132 BUG_ON(res.sr_bits != 1);
45133
45134 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
45135 cluster_start,
45136 num_clusters);
45137 if (!status)
45138 - atomic_inc(&osb->alloc_stats.local_data);
45139 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
45140 } else {
45141 if (min_clusters > (osb->bitmap_cpg - 1)) {
45142 /* The only paths asking for contiguousness
45143 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
45144 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45145 res.sr_bg_blkno,
45146 res.sr_bit_offset);
45147 - atomic_inc(&osb->alloc_stats.bitmap_data);
45148 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45149 *num_clusters = res.sr_bits;
45150 }
45151 }
45152 diff -urNp linux-3.0.8/fs/ocfs2/super.c linux-3.0.8/fs/ocfs2/super.c
45153 --- linux-3.0.8/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
45154 +++ linux-3.0.8/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
45155 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
45156 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45157 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45158 "Stats",
45159 - atomic_read(&osb->alloc_stats.bitmap_data),
45160 - atomic_read(&osb->alloc_stats.local_data),
45161 - atomic_read(&osb->alloc_stats.bg_allocs),
45162 - atomic_read(&osb->alloc_stats.moves),
45163 - atomic_read(&osb->alloc_stats.bg_extends));
45164 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45165 + atomic_read_unchecked(&osb->alloc_stats.local_data),
45166 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45167 + atomic_read_unchecked(&osb->alloc_stats.moves),
45168 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45169
45170 out += snprintf(buf + out, len - out,
45171 "%10s => State: %u Descriptor: %llu Size: %u bits "
45172 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
45173 spin_lock_init(&osb->osb_xattr_lock);
45174 ocfs2_init_steal_slots(osb);
45175
45176 - atomic_set(&osb->alloc_stats.moves, 0);
45177 - atomic_set(&osb->alloc_stats.local_data, 0);
45178 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
45179 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
45180 - atomic_set(&osb->alloc_stats.bg_extends, 0);
45181 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45182 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45183 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45184 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45185 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45186
45187 /* Copy the blockcheck stats from the superblock probe */
45188 osb->osb_ecc_stats = *stats;
45189 diff -urNp linux-3.0.8/fs/ocfs2/symlink.c linux-3.0.8/fs/ocfs2/symlink.c
45190 --- linux-3.0.8/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
45191 +++ linux-3.0.8/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
45192 @@ -142,7 +142,7 @@ bail:
45193
45194 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45195 {
45196 - char *link = nd_get_link(nd);
45197 + const char *link = nd_get_link(nd);
45198 if (!IS_ERR(link))
45199 kfree(link);
45200 }
45201 diff -urNp linux-3.0.8/fs/open.c linux-3.0.8/fs/open.c
45202 --- linux-3.0.8/fs/open.c 2011-07-21 22:17:23.000000000 -0400
45203 +++ linux-3.0.8/fs/open.c 2011-09-14 09:16:46.000000000 -0400
45204 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
45205 error = locks_verify_truncate(inode, NULL, length);
45206 if (!error)
45207 error = security_path_truncate(&path);
45208 +
45209 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45210 + error = -EACCES;
45211 +
45212 if (!error)
45213 error = do_truncate(path.dentry, length, 0, NULL);
45214
45215 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
45216 if (__mnt_is_readonly(path.mnt))
45217 res = -EROFS;
45218
45219 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45220 + res = -EACCES;
45221 +
45222 out_path_release:
45223 path_put(&path);
45224 out:
45225 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
45226 if (error)
45227 goto dput_and_out;
45228
45229 + gr_log_chdir(path.dentry, path.mnt);
45230 +
45231 set_fs_pwd(current->fs, &path);
45232
45233 dput_and_out:
45234 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
45235 goto out_putf;
45236
45237 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45238 +
45239 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45240 + error = -EPERM;
45241 +
45242 + if (!error)
45243 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45244 +
45245 if (!error)
45246 set_fs_pwd(current->fs, &file->f_path);
45247 out_putf:
45248 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
45249 if (error)
45250 goto dput_and_out;
45251
45252 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45253 + goto dput_and_out;
45254 +
45255 set_fs_root(current->fs, &path);
45256 +
45257 + gr_handle_chroot_chdir(&path);
45258 +
45259 error = 0;
45260 dput_and_out:
45261 path_put(&path);
45262 @@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
45263 err = mnt_want_write_file(file);
45264 if (err)
45265 goto out_putf;
45266 +
45267 mutex_lock(&inode->i_mutex);
45268 +
45269 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
45270 + err = -EACCES;
45271 + goto out_unlock;
45272 + }
45273 +
45274 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
45275 if (err)
45276 goto out_unlock;
45277 if (mode == (mode_t) -1)
45278 mode = inode->i_mode;
45279 +
45280 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
45281 + err = -EACCES;
45282 + goto out_unlock;
45283 + }
45284 +
45285 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
45286 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
45287 err = notify_change(dentry, &newattrs);
45288 @@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
45289 error = mnt_want_write(path.mnt);
45290 if (error)
45291 goto dput_and_out;
45292 +
45293 mutex_lock(&inode->i_mutex);
45294 +
45295 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
45296 + error = -EACCES;
45297 + goto out_unlock;
45298 + }
45299 +
45300 error = security_path_chmod(path.dentry, path.mnt, mode);
45301 if (error)
45302 goto out_unlock;
45303 if (mode == (mode_t) -1)
45304 mode = inode->i_mode;
45305 +
45306 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
45307 + error = -EACCES;
45308 + goto out_unlock;
45309 + }
45310 +
45311 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
45312 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
45313 error = notify_change(path.dentry, &newattrs);
45314 @@ -528,6 +576,9 @@ static int chown_common(struct path *pat
45315 int error;
45316 struct iattr newattrs;
45317
45318 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
45319 + return -EACCES;
45320 +
45321 newattrs.ia_valid = ATTR_CTIME;
45322 if (user != (uid_t) -1) {
45323 newattrs.ia_valid |= ATTR_UID;
45324 @@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
45325 if (!IS_ERR(tmp)) {
45326 fd = get_unused_fd_flags(flags);
45327 if (fd >= 0) {
45328 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
45329 + struct file *f;
45330 + /* don't allow to be set by userland */
45331 + flags &= ~FMODE_GREXEC;
45332 + f = do_filp_open(dfd, tmp, &op, lookup);
45333 if (IS_ERR(f)) {
45334 put_unused_fd(fd);
45335 fd = PTR_ERR(f);
45336 diff -urNp linux-3.0.8/fs/partitions/ldm.c linux-3.0.8/fs/partitions/ldm.c
45337 --- linux-3.0.8/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
45338 +++ linux-3.0.8/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
45339 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
45340 ldm_error ("A VBLK claims to have %d parts.", num);
45341 return false;
45342 }
45343 +
45344 if (rec >= num) {
45345 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
45346 return false;
45347 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
45348 goto found;
45349 }
45350
45351 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45352 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45353 if (!f) {
45354 ldm_crit ("Out of memory.");
45355 return false;
45356 diff -urNp linux-3.0.8/fs/pipe.c linux-3.0.8/fs/pipe.c
45357 --- linux-3.0.8/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
45358 +++ linux-3.0.8/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
45359 @@ -420,9 +420,9 @@ redo:
45360 }
45361 if (bufs) /* More to do? */
45362 continue;
45363 - if (!pipe->writers)
45364 + if (!atomic_read(&pipe->writers))
45365 break;
45366 - if (!pipe->waiting_writers) {
45367 + if (!atomic_read(&pipe->waiting_writers)) {
45368 /* syscall merging: Usually we must not sleep
45369 * if O_NONBLOCK is set, or if we got some data.
45370 * But if a writer sleeps in kernel space, then
45371 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
45372 mutex_lock(&inode->i_mutex);
45373 pipe = inode->i_pipe;
45374
45375 - if (!pipe->readers) {
45376 + if (!atomic_read(&pipe->readers)) {
45377 send_sig(SIGPIPE, current, 0);
45378 ret = -EPIPE;
45379 goto out;
45380 @@ -530,7 +530,7 @@ redo1:
45381 for (;;) {
45382 int bufs;
45383
45384 - if (!pipe->readers) {
45385 + if (!atomic_read(&pipe->readers)) {
45386 send_sig(SIGPIPE, current, 0);
45387 if (!ret)
45388 ret = -EPIPE;
45389 @@ -616,9 +616,9 @@ redo2:
45390 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45391 do_wakeup = 0;
45392 }
45393 - pipe->waiting_writers++;
45394 + atomic_inc(&pipe->waiting_writers);
45395 pipe_wait(pipe);
45396 - pipe->waiting_writers--;
45397 + atomic_dec(&pipe->waiting_writers);
45398 }
45399 out:
45400 mutex_unlock(&inode->i_mutex);
45401 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
45402 mask = 0;
45403 if (filp->f_mode & FMODE_READ) {
45404 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45405 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45406 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45407 mask |= POLLHUP;
45408 }
45409
45410 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
45411 * Most Unices do not set POLLERR for FIFOs but on Linux they
45412 * behave exactly like pipes for poll().
45413 */
45414 - if (!pipe->readers)
45415 + if (!atomic_read(&pipe->readers))
45416 mask |= POLLERR;
45417 }
45418
45419 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
45420
45421 mutex_lock(&inode->i_mutex);
45422 pipe = inode->i_pipe;
45423 - pipe->readers -= decr;
45424 - pipe->writers -= decw;
45425 + atomic_sub(decr, &pipe->readers);
45426 + atomic_sub(decw, &pipe->writers);
45427
45428 - if (!pipe->readers && !pipe->writers) {
45429 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45430 free_pipe_info(inode);
45431 } else {
45432 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45433 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
45434
45435 if (inode->i_pipe) {
45436 ret = 0;
45437 - inode->i_pipe->readers++;
45438 + atomic_inc(&inode->i_pipe->readers);
45439 }
45440
45441 mutex_unlock(&inode->i_mutex);
45442 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
45443
45444 if (inode->i_pipe) {
45445 ret = 0;
45446 - inode->i_pipe->writers++;
45447 + atomic_inc(&inode->i_pipe->writers);
45448 }
45449
45450 mutex_unlock(&inode->i_mutex);
45451 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
45452 if (inode->i_pipe) {
45453 ret = 0;
45454 if (filp->f_mode & FMODE_READ)
45455 - inode->i_pipe->readers++;
45456 + atomic_inc(&inode->i_pipe->readers);
45457 if (filp->f_mode & FMODE_WRITE)
45458 - inode->i_pipe->writers++;
45459 + atomic_inc(&inode->i_pipe->writers);
45460 }
45461
45462 mutex_unlock(&inode->i_mutex);
45463 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45464 inode->i_pipe = NULL;
45465 }
45466
45467 -static struct vfsmount *pipe_mnt __read_mostly;
45468 +struct vfsmount *pipe_mnt __read_mostly;
45469
45470 /*
45471 * pipefs_dname() is called from d_path().
45472 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
45473 goto fail_iput;
45474 inode->i_pipe = pipe;
45475
45476 - pipe->readers = pipe->writers = 1;
45477 + atomic_set(&pipe->readers, 1);
45478 + atomic_set(&pipe->writers, 1);
45479 inode->i_fop = &rdwr_pipefifo_fops;
45480
45481 /*
45482 diff -urNp linux-3.0.8/fs/proc/array.c linux-3.0.8/fs/proc/array.c
45483 --- linux-3.0.8/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
45484 +++ linux-3.0.8/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
45485 @@ -60,6 +60,7 @@
45486 #include <linux/tty.h>
45487 #include <linux/string.h>
45488 #include <linux/mman.h>
45489 +#include <linux/grsecurity.h>
45490 #include <linux/proc_fs.h>
45491 #include <linux/ioport.h>
45492 #include <linux/uaccess.h>
45493 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
45494 seq_putc(m, '\n');
45495 }
45496
45497 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45498 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45499 +{
45500 + if (p->mm)
45501 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45502 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45503 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45504 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45505 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45506 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45507 + else
45508 + seq_printf(m, "PaX:\t-----\n");
45509 +}
45510 +#endif
45511 +
45512 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45513 struct pid *pid, struct task_struct *task)
45514 {
45515 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
45516 task_cpus_allowed(m, task);
45517 cpuset_task_status_allowed(m, task);
45518 task_context_switch_counts(m, task);
45519 +
45520 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45521 + task_pax(m, task);
45522 +#endif
45523 +
45524 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45525 + task_grsec_rbac(m, task);
45526 +#endif
45527 +
45528 return 0;
45529 }
45530
45531 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45532 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45533 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45534 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45535 +#endif
45536 +
45537 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45538 struct pid *pid, struct task_struct *task, int whole)
45539 {
45540 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
45541 cputime_t cutime, cstime, utime, stime;
45542 cputime_t cgtime, gtime;
45543 unsigned long rsslim = 0;
45544 - char tcomm[sizeof(task->comm)];
45545 + char tcomm[sizeof(task->comm)] = { 0 };
45546 unsigned long flags;
45547
45548 + pax_track_stack();
45549 +
45550 state = *get_task_state(task);
45551 vsize = eip = esp = 0;
45552 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45553 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
45554 gtime = task->gtime;
45555 }
45556
45557 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45558 + if (PAX_RAND_FLAGS(mm)) {
45559 + eip = 0;
45560 + esp = 0;
45561 + wchan = 0;
45562 + }
45563 +#endif
45564 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45565 + wchan = 0;
45566 + eip =0;
45567 + esp =0;
45568 +#endif
45569 +
45570 /* scale priority and nice values from timeslices to -20..20 */
45571 /* to make it look like a "normal" Unix priority/nice value */
45572 priority = task_prio(task);
45573 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
45574 vsize,
45575 mm ? get_mm_rss(mm) : 0,
45576 rsslim,
45577 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45578 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45579 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45580 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45581 +#else
45582 mm ? (permitted ? mm->start_code : 1) : 0,
45583 mm ? (permitted ? mm->end_code : 1) : 0,
45584 (permitted && mm) ? mm->start_stack : 0,
45585 +#endif
45586 esp,
45587 eip,
45588 /* The signal information here is obsolete.
45589 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
45590
45591 return 0;
45592 }
45593 +
45594 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45595 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45596 +{
45597 + u32 curr_ip = 0;
45598 + unsigned long flags;
45599 +
45600 + if (lock_task_sighand(task, &flags)) {
45601 + curr_ip = task->signal->curr_ip;
45602 + unlock_task_sighand(task, &flags);
45603 + }
45604 +
45605 + return sprintf(buffer, "%pI4\n", &curr_ip);
45606 +}
45607 +#endif
45608 diff -urNp linux-3.0.8/fs/proc/base.c linux-3.0.8/fs/proc/base.c
45609 --- linux-3.0.8/fs/proc/base.c 2011-10-24 08:05:21.000000000 -0400
45610 +++ linux-3.0.8/fs/proc/base.c 2011-10-19 03:59:32.000000000 -0400
45611 @@ -107,6 +107,22 @@ struct pid_entry {
45612 union proc_op op;
45613 };
45614
45615 +struct getdents_callback {
45616 + struct linux_dirent __user * current_dir;
45617 + struct linux_dirent __user * previous;
45618 + struct file * file;
45619 + int count;
45620 + int error;
45621 +};
45622 +
45623 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45624 + loff_t offset, u64 ino, unsigned int d_type)
45625 +{
45626 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45627 + buf->error = -EINVAL;
45628 + return 0;
45629 +}
45630 +
45631 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45632 .name = (NAME), \
45633 .len = sizeof(NAME) - 1, \
45634 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45635 if (task == current)
45636 return mm;
45637
45638 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45639 + return ERR_PTR(-EPERM);
45640 +
45641 /*
45642 * If current is actively ptrace'ing, and would also be
45643 * permitted to freshly attach with ptrace now, permit it.
45644 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45645 if (!mm->arg_end)
45646 goto out_mm; /* Shh! No looking before we're done */
45647
45648 + if (gr_acl_handle_procpidmem(task))
45649 + goto out_mm;
45650 +
45651 len = mm->arg_end - mm->arg_start;
45652
45653 if (len > PAGE_SIZE)
45654 @@ -309,12 +331,28 @@ out:
45655 return res;
45656 }
45657
45658 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45659 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45660 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45661 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45662 +#endif
45663 +
45664 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45665 {
45666 struct mm_struct *mm = mm_for_maps(task);
45667 int res = PTR_ERR(mm);
45668 if (mm && !IS_ERR(mm)) {
45669 unsigned int nwords = 0;
45670 +
45671 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45672 + /* allow if we're currently ptracing this task */
45673 + if (PAX_RAND_FLAGS(mm) &&
45674 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45675 + mmput(mm);
45676 + return 0;
45677 + }
45678 +#endif
45679 +
45680 do {
45681 nwords += 2;
45682 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45683 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45684 }
45685
45686
45687 -#ifdef CONFIG_KALLSYMS
45688 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45689 /*
45690 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45691 * Returns the resolved symbol. If that fails, simply return the address.
45692 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45693 mutex_unlock(&task->signal->cred_guard_mutex);
45694 }
45695
45696 -#ifdef CONFIG_STACKTRACE
45697 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45698
45699 #define MAX_STACK_TRACE_DEPTH 64
45700
45701 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45702 return count;
45703 }
45704
45705 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45706 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45707 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45708 {
45709 long nr;
45710 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45711 /************************************************************************/
45712
45713 /* permission checks */
45714 -static int proc_fd_access_allowed(struct inode *inode)
45715 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45716 {
45717 struct task_struct *task;
45718 int allowed = 0;
45719 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45720 */
45721 task = get_proc_task(inode);
45722 if (task) {
45723 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45724 + if (log)
45725 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45726 + else
45727 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45728 put_task_struct(task);
45729 }
45730 return allowed;
45731 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45732 if (!task)
45733 goto out_no_task;
45734
45735 + if (gr_acl_handle_procpidmem(task))
45736 + goto out;
45737 +
45738 ret = -ENOMEM;
45739 page = (char *)__get_free_page(GFP_TEMPORARY);
45740 if (!page)
45741 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
45742 path_put(&nd->path);
45743
45744 /* Are we allowed to snoop on the tasks file descriptors? */
45745 - if (!proc_fd_access_allowed(inode))
45746 + if (!proc_fd_access_allowed(inode,0))
45747 goto out;
45748
45749 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45750 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
45751 struct path path;
45752
45753 /* Are we allowed to snoop on the tasks file descriptors? */
45754 - if (!proc_fd_access_allowed(inode))
45755 - goto out;
45756 + /* logging this is needed for learning on chromium to work properly,
45757 + but we don't want to flood the logs from 'ps' which does a readlink
45758 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45759 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45760 + */
45761 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45762 + if (!proc_fd_access_allowed(inode,0))
45763 + goto out;
45764 + } else {
45765 + if (!proc_fd_access_allowed(inode,1))
45766 + goto out;
45767 + }
45768
45769 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45770 if (error)
45771 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
45772 rcu_read_lock();
45773 cred = __task_cred(task);
45774 inode->i_uid = cred->euid;
45775 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45776 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45777 +#else
45778 inode->i_gid = cred->egid;
45779 +#endif
45780 rcu_read_unlock();
45781 }
45782 security_task_to_inode(task, inode);
45783 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
45784 struct inode *inode = dentry->d_inode;
45785 struct task_struct *task;
45786 const struct cred *cred;
45787 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45788 + const struct cred *tmpcred = current_cred();
45789 +#endif
45790
45791 generic_fillattr(inode, stat);
45792
45793 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
45794 stat->uid = 0;
45795 stat->gid = 0;
45796 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45797 +
45798 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45799 + rcu_read_unlock();
45800 + return -ENOENT;
45801 + }
45802 +
45803 if (task) {
45804 + cred = __task_cred(task);
45805 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45806 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45807 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45808 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45809 +#endif
45810 + ) {
45811 +#endif
45812 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45813 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45814 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45815 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45816 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45817 +#endif
45818 task_dumpable(task)) {
45819 - cred = __task_cred(task);
45820 stat->uid = cred->euid;
45821 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45822 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45823 +#else
45824 stat->gid = cred->egid;
45825 +#endif
45826 + }
45827 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45828 + } else {
45829 + rcu_read_unlock();
45830 + return -ENOENT;
45831 }
45832 +#endif
45833 }
45834 rcu_read_unlock();
45835 return 0;
45836 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
45837
45838 if (task) {
45839 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45840 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45841 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45842 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45843 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45844 +#endif
45845 task_dumpable(task)) {
45846 rcu_read_lock();
45847 cred = __task_cred(task);
45848 inode->i_uid = cred->euid;
45849 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45850 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45851 +#else
45852 inode->i_gid = cred->egid;
45853 +#endif
45854 rcu_read_unlock();
45855 } else {
45856 inode->i_uid = 0;
45857 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
45858 int fd = proc_fd(inode);
45859
45860 if (task) {
45861 - files = get_files_struct(task);
45862 + if (!gr_acl_handle_procpidmem(task))
45863 + files = get_files_struct(task);
45864 put_task_struct(task);
45865 }
45866 if (files) {
45867 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
45868 */
45869 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
45870 {
45871 + struct task_struct *task;
45872 int rv = generic_permission(inode, mask, flags, NULL);
45873 - if (rv == 0)
45874 - return 0;
45875 +
45876 if (task_pid(current) == proc_pid(inode))
45877 rv = 0;
45878 +
45879 + task = get_proc_task(inode);
45880 + if (task == NULL)
45881 + return rv;
45882 +
45883 + if (gr_acl_handle_procpidmem(task))
45884 + rv = -EACCES;
45885 +
45886 + put_task_struct(task);
45887 +
45888 return rv;
45889 }
45890
45891 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
45892 if (!task)
45893 goto out_no_task;
45894
45895 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45896 + goto out;
45897 +
45898 /*
45899 * Yes, it does not scale. And it should not. Don't add
45900 * new entries into /proc/<tgid>/ without very good reasons.
45901 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
45902 if (!task)
45903 goto out_no_task;
45904
45905 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45906 + goto out;
45907 +
45908 ret = 0;
45909 i = filp->f_pos;
45910 switch (i) {
45911 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
45912 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45913 void *cookie)
45914 {
45915 - char *s = nd_get_link(nd);
45916 + const char *s = nd_get_link(nd);
45917 if (!IS_ERR(s))
45918 __putname(s);
45919 }
45920 @@ -2656,6 +2771,7 @@ static struct dentry *proc_base_instanti
45921 if (p->fop)
45922 inode->i_fop = p->fop;
45923 ei->op = p->op;
45924 +
45925 d_add(dentry, inode);
45926 error = NULL;
45927 out:
45928 @@ -2795,7 +2911,7 @@ static const struct pid_entry tgid_base_
45929 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45930 #endif
45931 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45932 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45933 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45934 INF("syscall", S_IRUGO, proc_pid_syscall),
45935 #endif
45936 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45937 @@ -2820,10 +2936,10 @@ static const struct pid_entry tgid_base_
45938 #ifdef CONFIG_SECURITY
45939 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45940 #endif
45941 -#ifdef CONFIG_KALLSYMS
45942 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45943 INF("wchan", S_IRUGO, proc_pid_wchan),
45944 #endif
45945 -#ifdef CONFIG_STACKTRACE
45946 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45947 ONE("stack", S_IRUGO, proc_pid_stack),
45948 #endif
45949 #ifdef CONFIG_SCHEDSTATS
45950 @@ -2857,6 +2973,9 @@ static const struct pid_entry tgid_base_
45951 #ifdef CONFIG_HARDWALL
45952 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45953 #endif
45954 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45955 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45956 +#endif
45957 };
45958
45959 static int proc_tgid_base_readdir(struct file * filp,
45960 @@ -2982,7 +3101,14 @@ static struct dentry *proc_pid_instantia
45961 if (!inode)
45962 goto out;
45963
45964 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45965 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45966 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45967 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45968 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45969 +#else
45970 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45971 +#endif
45972 inode->i_op = &proc_tgid_base_inode_operations;
45973 inode->i_fop = &proc_tgid_base_operations;
45974 inode->i_flags|=S_IMMUTABLE;
45975 @@ -3024,7 +3150,14 @@ struct dentry *proc_pid_lookup(struct in
45976 if (!task)
45977 goto out;
45978
45979 + if (!has_group_leader_pid(task))
45980 + goto out_put_task;
45981 +
45982 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45983 + goto out_put_task;
45984 +
45985 result = proc_pid_instantiate(dir, dentry, task, NULL);
45986 +out_put_task:
45987 put_task_struct(task);
45988 out:
45989 return result;
45990 @@ -3089,6 +3222,11 @@ int proc_pid_readdir(struct file * filp,
45991 {
45992 unsigned int nr;
45993 struct task_struct *reaper;
45994 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45995 + const struct cred *tmpcred = current_cred();
45996 + const struct cred *itercred;
45997 +#endif
45998 + filldir_t __filldir = filldir;
45999 struct tgid_iter iter;
46000 struct pid_namespace *ns;
46001
46002 @@ -3112,8 +3250,27 @@ int proc_pid_readdir(struct file * filp,
46003 for (iter = next_tgid(ns, iter);
46004 iter.task;
46005 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46006 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46007 + rcu_read_lock();
46008 + itercred = __task_cred(iter.task);
46009 +#endif
46010 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46011 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46012 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46013 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46014 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46015 +#endif
46016 + )
46017 +#endif
46018 + )
46019 + __filldir = &gr_fake_filldir;
46020 + else
46021 + __filldir = filldir;
46022 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46023 + rcu_read_unlock();
46024 +#endif
46025 filp->f_pos = iter.tgid + TGID_OFFSET;
46026 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46027 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46028 put_task_struct(iter.task);
46029 goto out;
46030 }
46031 @@ -3141,7 +3298,7 @@ static const struct pid_entry tid_base_s
46032 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46033 #endif
46034 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46035 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46036 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46037 INF("syscall", S_IRUGO, proc_pid_syscall),
46038 #endif
46039 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46040 @@ -3165,10 +3322,10 @@ static const struct pid_entry tid_base_s
46041 #ifdef CONFIG_SECURITY
46042 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46043 #endif
46044 -#ifdef CONFIG_KALLSYMS
46045 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46046 INF("wchan", S_IRUGO, proc_pid_wchan),
46047 #endif
46048 -#ifdef CONFIG_STACKTRACE
46049 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46050 ONE("stack", S_IRUGO, proc_pid_stack),
46051 #endif
46052 #ifdef CONFIG_SCHEDSTATS
46053 diff -urNp linux-3.0.8/fs/proc/cmdline.c linux-3.0.8/fs/proc/cmdline.c
46054 --- linux-3.0.8/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
46055 +++ linux-3.0.8/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
46056 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
46057
46058 static int __init proc_cmdline_init(void)
46059 {
46060 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46061 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46062 +#else
46063 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46064 +#endif
46065 return 0;
46066 }
46067 module_init(proc_cmdline_init);
46068 diff -urNp linux-3.0.8/fs/proc/devices.c linux-3.0.8/fs/proc/devices.c
46069 --- linux-3.0.8/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
46070 +++ linux-3.0.8/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
46071 @@ -64,7 +64,11 @@ static const struct file_operations proc
46072
46073 static int __init proc_devices_init(void)
46074 {
46075 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46076 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46077 +#else
46078 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46079 +#endif
46080 return 0;
46081 }
46082 module_init(proc_devices_init);
46083 diff -urNp linux-3.0.8/fs/proc/inode.c linux-3.0.8/fs/proc/inode.c
46084 --- linux-3.0.8/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
46085 +++ linux-3.0.8/fs/proc/inode.c 2011-10-19 03:59:32.000000000 -0400
46086 @@ -18,12 +18,18 @@
46087 #include <linux/module.h>
46088 #include <linux/sysctl.h>
46089 #include <linux/slab.h>
46090 +#include <linux/grsecurity.h>
46091
46092 #include <asm/system.h>
46093 #include <asm/uaccess.h>
46094
46095 #include "internal.h"
46096
46097 +#ifdef CONFIG_PROC_SYSCTL
46098 +extern const struct inode_operations proc_sys_inode_operations;
46099 +extern const struct inode_operations proc_sys_dir_operations;
46100 +#endif
46101 +
46102 static void proc_evict_inode(struct inode *inode)
46103 {
46104 struct proc_dir_entry *de;
46105 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
46106 ns_ops = PROC_I(inode)->ns_ops;
46107 if (ns_ops && ns_ops->put)
46108 ns_ops->put(PROC_I(inode)->ns);
46109 +
46110 +#ifdef CONFIG_PROC_SYSCTL
46111 + if (inode->i_op == &proc_sys_inode_operations ||
46112 + inode->i_op == &proc_sys_dir_operations)
46113 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46114 +#endif
46115 +
46116 }
46117
46118 static struct kmem_cache * proc_inode_cachep;
46119 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
46120 if (de->mode) {
46121 inode->i_mode = de->mode;
46122 inode->i_uid = de->uid;
46123 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46124 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46125 +#else
46126 inode->i_gid = de->gid;
46127 +#endif
46128 }
46129 if (de->size)
46130 inode->i_size = de->size;
46131 diff -urNp linux-3.0.8/fs/proc/internal.h linux-3.0.8/fs/proc/internal.h
46132 --- linux-3.0.8/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
46133 +++ linux-3.0.8/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
46134 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
46135 struct pid *pid, struct task_struct *task);
46136 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46137 struct pid *pid, struct task_struct *task);
46138 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46139 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46140 +#endif
46141 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46142
46143 extern const struct file_operations proc_maps_operations;
46144 diff -urNp linux-3.0.8/fs/proc/Kconfig linux-3.0.8/fs/proc/Kconfig
46145 --- linux-3.0.8/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
46146 +++ linux-3.0.8/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
46147 @@ -30,12 +30,12 @@ config PROC_FS
46148
46149 config PROC_KCORE
46150 bool "/proc/kcore support" if !ARM
46151 - depends on PROC_FS && MMU
46152 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46153
46154 config PROC_VMCORE
46155 bool "/proc/vmcore support"
46156 - depends on PROC_FS && CRASH_DUMP
46157 - default y
46158 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46159 + default n
46160 help
46161 Exports the dump image of crashed kernel in ELF format.
46162
46163 @@ -59,8 +59,8 @@ config PROC_SYSCTL
46164 limited in memory.
46165
46166 config PROC_PAGE_MONITOR
46167 - default y
46168 - depends on PROC_FS && MMU
46169 + default n
46170 + depends on PROC_FS && MMU && !GRKERNSEC
46171 bool "Enable /proc page monitoring" if EXPERT
46172 help
46173 Various /proc files exist to monitor process memory utilization:
46174 diff -urNp linux-3.0.8/fs/proc/kcore.c linux-3.0.8/fs/proc/kcore.c
46175 --- linux-3.0.8/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
46176 +++ linux-3.0.8/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
46177 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
46178 off_t offset = 0;
46179 struct kcore_list *m;
46180
46181 + pax_track_stack();
46182 +
46183 /* setup ELF header */
46184 elf = (struct elfhdr *) bufp;
46185 bufp += sizeof(struct elfhdr);
46186 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
46187 * the addresses in the elf_phdr on our list.
46188 */
46189 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46190 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46191 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46192 + if (tsz > buflen)
46193 tsz = buflen;
46194 -
46195 +
46196 while (buflen) {
46197 struct kcore_list *m;
46198
46199 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
46200 kfree(elf_buf);
46201 } else {
46202 if (kern_addr_valid(start)) {
46203 - unsigned long n;
46204 + char *elf_buf;
46205 + mm_segment_t oldfs;
46206
46207 - n = copy_to_user(buffer, (char *)start, tsz);
46208 - /*
46209 - * We cannot distingush between fault on source
46210 - * and fault on destination. When this happens
46211 - * we clear too and hope it will trigger the
46212 - * EFAULT again.
46213 - */
46214 - if (n) {
46215 - if (clear_user(buffer + tsz - n,
46216 - n))
46217 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46218 + if (!elf_buf)
46219 + return -ENOMEM;
46220 + oldfs = get_fs();
46221 + set_fs(KERNEL_DS);
46222 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46223 + set_fs(oldfs);
46224 + if (copy_to_user(buffer, elf_buf, tsz)) {
46225 + kfree(elf_buf);
46226 return -EFAULT;
46227 + }
46228 }
46229 + set_fs(oldfs);
46230 + kfree(elf_buf);
46231 } else {
46232 if (clear_user(buffer, tsz))
46233 return -EFAULT;
46234 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
46235
46236 static int open_kcore(struct inode *inode, struct file *filp)
46237 {
46238 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46239 + return -EPERM;
46240 +#endif
46241 if (!capable(CAP_SYS_RAWIO))
46242 return -EPERM;
46243 if (kcore_need_update)
46244 diff -urNp linux-3.0.8/fs/proc/meminfo.c linux-3.0.8/fs/proc/meminfo.c
46245 --- linux-3.0.8/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
46246 +++ linux-3.0.8/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
46247 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46248 unsigned long pages[NR_LRU_LISTS];
46249 int lru;
46250
46251 + pax_track_stack();
46252 +
46253 /*
46254 * display in kilobytes.
46255 */
46256 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
46257 vmi.used >> 10,
46258 vmi.largest_chunk >> 10
46259 #ifdef CONFIG_MEMORY_FAILURE
46260 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46261 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46262 #endif
46263 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46264 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46265 diff -urNp linux-3.0.8/fs/proc/nommu.c linux-3.0.8/fs/proc/nommu.c
46266 --- linux-3.0.8/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
46267 +++ linux-3.0.8/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
46268 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
46269 if (len < 1)
46270 len = 1;
46271 seq_printf(m, "%*c", len, ' ');
46272 - seq_path(m, &file->f_path, "");
46273 + seq_path(m, &file->f_path, "\n\\");
46274 }
46275
46276 seq_putc(m, '\n');
46277 diff -urNp linux-3.0.8/fs/proc/proc_net.c linux-3.0.8/fs/proc/proc_net.c
46278 --- linux-3.0.8/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
46279 +++ linux-3.0.8/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
46280 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
46281 struct task_struct *task;
46282 struct nsproxy *ns;
46283 struct net *net = NULL;
46284 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46285 + const struct cred *cred = current_cred();
46286 +#endif
46287 +
46288 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46289 + if (cred->fsuid)
46290 + return net;
46291 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46292 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46293 + return net;
46294 +#endif
46295
46296 rcu_read_lock();
46297 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46298 diff -urNp linux-3.0.8/fs/proc/proc_sysctl.c linux-3.0.8/fs/proc/proc_sysctl.c
46299 --- linux-3.0.8/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
46300 +++ linux-3.0.8/fs/proc/proc_sysctl.c 2011-10-19 03:59:32.000000000 -0400
46301 @@ -8,11 +8,13 @@
46302 #include <linux/namei.h>
46303 #include "internal.h"
46304
46305 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46306 +
46307 static const struct dentry_operations proc_sys_dentry_operations;
46308 static const struct file_operations proc_sys_file_operations;
46309 -static const struct inode_operations proc_sys_inode_operations;
46310 +const struct inode_operations proc_sys_inode_operations;
46311 static const struct file_operations proc_sys_dir_file_operations;
46312 -static const struct inode_operations proc_sys_dir_operations;
46313 +const struct inode_operations proc_sys_dir_operations;
46314
46315 static struct inode *proc_sys_make_inode(struct super_block *sb,
46316 struct ctl_table_header *head, struct ctl_table *table)
46317 @@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
46318
46319 err = NULL;
46320 d_set_d_op(dentry, &proc_sys_dentry_operations);
46321 +
46322 + gr_handle_proc_create(dentry, inode);
46323 +
46324 d_add(dentry, inode);
46325
46326 + if (gr_handle_sysctl(p, MAY_EXEC))
46327 + err = ERR_PTR(-ENOENT);
46328 +
46329 out:
46330 sysctl_head_finish(head);
46331 return err;
46332 @@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
46333 return -ENOMEM;
46334 } else {
46335 d_set_d_op(child, &proc_sys_dentry_operations);
46336 +
46337 + gr_handle_proc_create(child, inode);
46338 +
46339 d_add(child, inode);
46340 }
46341 } else {
46342 @@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
46343 if (*pos < file->f_pos)
46344 continue;
46345
46346 + if (gr_handle_sysctl(table, 0))
46347 + continue;
46348 +
46349 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46350 if (res)
46351 return res;
46352 @@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
46353 if (IS_ERR(head))
46354 return PTR_ERR(head);
46355
46356 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46357 + return -ENOENT;
46358 +
46359 generic_fillattr(inode, stat);
46360 if (table)
46361 stat->mode = (stat->mode & S_IFMT) | table->mode;
46362 @@ -374,13 +391,13 @@ static const struct file_operations proc
46363 .llseek = generic_file_llseek,
46364 };
46365
46366 -static const struct inode_operations proc_sys_inode_operations = {
46367 +const struct inode_operations proc_sys_inode_operations = {
46368 .permission = proc_sys_permission,
46369 .setattr = proc_sys_setattr,
46370 .getattr = proc_sys_getattr,
46371 };
46372
46373 -static const struct inode_operations proc_sys_dir_operations = {
46374 +const struct inode_operations proc_sys_dir_operations = {
46375 .lookup = proc_sys_lookup,
46376 .permission = proc_sys_permission,
46377 .setattr = proc_sys_setattr,
46378 diff -urNp linux-3.0.8/fs/proc/root.c linux-3.0.8/fs/proc/root.c
46379 --- linux-3.0.8/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
46380 +++ linux-3.0.8/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
46381 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
46382 #ifdef CONFIG_PROC_DEVICETREE
46383 proc_device_tree_init();
46384 #endif
46385 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46386 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46387 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46388 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46389 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46390 +#endif
46391 +#else
46392 proc_mkdir("bus", NULL);
46393 +#endif
46394 proc_sys_init();
46395 }
46396
46397 diff -urNp linux-3.0.8/fs/proc/task_mmu.c linux-3.0.8/fs/proc/task_mmu.c
46398 --- linux-3.0.8/fs/proc/task_mmu.c 2011-10-24 08:05:30.000000000 -0400
46399 +++ linux-3.0.8/fs/proc/task_mmu.c 2011-10-16 21:55:28.000000000 -0400
46400 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
46401 "VmExe:\t%8lu kB\n"
46402 "VmLib:\t%8lu kB\n"
46403 "VmPTE:\t%8lu kB\n"
46404 - "VmSwap:\t%8lu kB\n",
46405 - hiwater_vm << (PAGE_SHIFT-10),
46406 + "VmSwap:\t%8lu kB\n"
46407 +
46408 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46409 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46410 +#endif
46411 +
46412 + ,hiwater_vm << (PAGE_SHIFT-10),
46413 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46414 mm->locked_vm << (PAGE_SHIFT-10),
46415 hiwater_rss << (PAGE_SHIFT-10),
46416 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
46417 data << (PAGE_SHIFT-10),
46418 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46419 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46420 - swap << (PAGE_SHIFT-10));
46421 + swap << (PAGE_SHIFT-10)
46422 +
46423 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46424 + , mm->context.user_cs_base, mm->context.user_cs_limit
46425 +#endif
46426 +
46427 + );
46428 }
46429
46430 unsigned long task_vsize(struct mm_struct *mm)
46431 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
46432 return ret;
46433 }
46434
46435 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46436 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46437 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46438 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46439 +#endif
46440 +
46441 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46442 {
46443 struct mm_struct *mm = vma->vm_mm;
46444 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
46445 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46446 }
46447
46448 - /* We don't show the stack guard page in /proc/maps */
46449 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46450 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46451 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46452 +#else
46453 start = vma->vm_start;
46454 - if (stack_guard_page_start(vma, start))
46455 - start += PAGE_SIZE;
46456 end = vma->vm_end;
46457 - if (stack_guard_page_end(vma, end))
46458 - end -= PAGE_SIZE;
46459 +#endif
46460
46461 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46462 start,
46463 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
46464 flags & VM_WRITE ? 'w' : '-',
46465 flags & VM_EXEC ? 'x' : '-',
46466 flags & VM_MAYSHARE ? 's' : 'p',
46467 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46468 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46469 +#else
46470 pgoff,
46471 +#endif
46472 MAJOR(dev), MINOR(dev), ino, &len);
46473
46474 /*
46475 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
46476 */
46477 if (file) {
46478 pad_len_spaces(m, len);
46479 - seq_path(m, &file->f_path, "\n");
46480 + seq_path(m, &file->f_path, "\n\\");
46481 } else {
46482 const char *name = arch_vma_name(vma);
46483 if (!name) {
46484 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
46485 if (vma->vm_start <= mm->brk &&
46486 vma->vm_end >= mm->start_brk) {
46487 name = "[heap]";
46488 - } else if (vma->vm_start <= mm->start_stack &&
46489 - vma->vm_end >= mm->start_stack) {
46490 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46491 + (vma->vm_start <= mm->start_stack &&
46492 + vma->vm_end >= mm->start_stack)) {
46493 name = "[stack]";
46494 }
46495 } else {
46496 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
46497 };
46498
46499 memset(&mss, 0, sizeof mss);
46500 - mss.vma = vma;
46501 - /* mmap_sem is held in m_start */
46502 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46503 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46504 -
46505 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46506 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46507 +#endif
46508 + mss.vma = vma;
46509 + /* mmap_sem is held in m_start */
46510 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46511 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46512 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46513 + }
46514 +#endif
46515 show_map_vma(m, vma);
46516
46517 seq_printf(m,
46518 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
46519 "KernelPageSize: %8lu kB\n"
46520 "MMUPageSize: %8lu kB\n"
46521 "Locked: %8lu kB\n",
46522 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46523 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46524 +#else
46525 (vma->vm_end - vma->vm_start) >> 10,
46526 +#endif
46527 mss.resident >> 10,
46528 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46529 mss.shared_clean >> 10,
46530 @@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
46531
46532 if (file) {
46533 seq_printf(m, " file=");
46534 - seq_path(m, &file->f_path, "\n\t= ");
46535 + seq_path(m, &file->f_path, "\n\t\\= ");
46536 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46537 seq_printf(m, " heap");
46538 } else if (vma->vm_start <= mm->start_stack &&
46539 diff -urNp linux-3.0.8/fs/proc/task_nommu.c linux-3.0.8/fs/proc/task_nommu.c
46540 --- linux-3.0.8/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
46541 +++ linux-3.0.8/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
46542 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
46543 else
46544 bytes += kobjsize(mm);
46545
46546 - if (current->fs && current->fs->users > 1)
46547 + if (current->fs && atomic_read(&current->fs->users) > 1)
46548 sbytes += kobjsize(current->fs);
46549 else
46550 bytes += kobjsize(current->fs);
46551 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
46552
46553 if (file) {
46554 pad_len_spaces(m, len);
46555 - seq_path(m, &file->f_path, "");
46556 + seq_path(m, &file->f_path, "\n\\");
46557 } else if (mm) {
46558 if (vma->vm_start <= mm->start_stack &&
46559 vma->vm_end >= mm->start_stack) {
46560 diff -urNp linux-3.0.8/fs/quota/netlink.c linux-3.0.8/fs/quota/netlink.c
46561 --- linux-3.0.8/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
46562 +++ linux-3.0.8/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
46563 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
46564 void quota_send_warning(short type, unsigned int id, dev_t dev,
46565 const char warntype)
46566 {
46567 - static atomic_t seq;
46568 + static atomic_unchecked_t seq;
46569 struct sk_buff *skb;
46570 void *msg_head;
46571 int ret;
46572 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
46573 "VFS: Not enough memory to send quota warning.\n");
46574 return;
46575 }
46576 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46577 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46578 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46579 if (!msg_head) {
46580 printk(KERN_ERR
46581 diff -urNp linux-3.0.8/fs/readdir.c linux-3.0.8/fs/readdir.c
46582 --- linux-3.0.8/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
46583 +++ linux-3.0.8/fs/readdir.c 2011-10-06 04:17:55.000000000 -0400
46584 @@ -17,6 +17,7 @@
46585 #include <linux/security.h>
46586 #include <linux/syscalls.h>
46587 #include <linux/unistd.h>
46588 +#include <linux/namei.h>
46589
46590 #include <asm/uaccess.h>
46591
46592 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46593
46594 struct readdir_callback {
46595 struct old_linux_dirent __user * dirent;
46596 + struct file * file;
46597 int result;
46598 };
46599
46600 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46601 buf->result = -EOVERFLOW;
46602 return -EOVERFLOW;
46603 }
46604 +
46605 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46606 + return 0;
46607 +
46608 buf->result++;
46609 dirent = buf->dirent;
46610 if (!access_ok(VERIFY_WRITE, dirent,
46611 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46612
46613 buf.result = 0;
46614 buf.dirent = dirent;
46615 + buf.file = file;
46616
46617 error = vfs_readdir(file, fillonedir, &buf);
46618 if (buf.result)
46619 @@ -142,6 +149,7 @@ struct linux_dirent {
46620 struct getdents_callback {
46621 struct linux_dirent __user * current_dir;
46622 struct linux_dirent __user * previous;
46623 + struct file * file;
46624 int count;
46625 int error;
46626 };
46627 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
46628 buf->error = -EOVERFLOW;
46629 return -EOVERFLOW;
46630 }
46631 +
46632 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46633 + return 0;
46634 +
46635 dirent = buf->previous;
46636 if (dirent) {
46637 if (__put_user(offset, &dirent->d_off))
46638 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46639 buf.previous = NULL;
46640 buf.count = count;
46641 buf.error = 0;
46642 + buf.file = file;
46643
46644 error = vfs_readdir(file, filldir, &buf);
46645 if (error >= 0)
46646 @@ -229,6 +242,7 @@ out:
46647 struct getdents_callback64 {
46648 struct linux_dirent64 __user * current_dir;
46649 struct linux_dirent64 __user * previous;
46650 + struct file *file;
46651 int count;
46652 int error;
46653 };
46654 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46655 buf->error = -EINVAL; /* only used if we fail.. */
46656 if (reclen > buf->count)
46657 return -EINVAL;
46658 +
46659 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46660 + return 0;
46661 +
46662 dirent = buf->previous;
46663 if (dirent) {
46664 if (__put_user(offset, &dirent->d_off))
46665 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46666
46667 buf.current_dir = dirent;
46668 buf.previous = NULL;
46669 + buf.file = file;
46670 buf.count = count;
46671 buf.error = 0;
46672
46673 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46674 error = buf.error;
46675 lastdirent = buf.previous;
46676 if (lastdirent) {
46677 - typeof(lastdirent->d_off) d_off = file->f_pos;
46678 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46679 if (__put_user(d_off, &lastdirent->d_off))
46680 error = -EFAULT;
46681 else
46682 diff -urNp linux-3.0.8/fs/reiserfs/dir.c linux-3.0.8/fs/reiserfs/dir.c
46683 --- linux-3.0.8/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
46684 +++ linux-3.0.8/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
46685 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46686 struct reiserfs_dir_entry de;
46687 int ret = 0;
46688
46689 + pax_track_stack();
46690 +
46691 reiserfs_write_lock(inode->i_sb);
46692
46693 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46694 diff -urNp linux-3.0.8/fs/reiserfs/do_balan.c linux-3.0.8/fs/reiserfs/do_balan.c
46695 --- linux-3.0.8/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
46696 +++ linux-3.0.8/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
46697 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46698 return;
46699 }
46700
46701 - atomic_inc(&(fs_generation(tb->tb_sb)));
46702 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46703 do_balance_starts(tb);
46704
46705 /* balance leaf returns 0 except if combining L R and S into
46706 diff -urNp linux-3.0.8/fs/reiserfs/journal.c linux-3.0.8/fs/reiserfs/journal.c
46707 --- linux-3.0.8/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
46708 +++ linux-3.0.8/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
46709 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
46710 struct buffer_head *bh;
46711 int i, j;
46712
46713 + pax_track_stack();
46714 +
46715 bh = __getblk(dev, block, bufsize);
46716 if (buffer_uptodate(bh))
46717 return (bh);
46718 diff -urNp linux-3.0.8/fs/reiserfs/namei.c linux-3.0.8/fs/reiserfs/namei.c
46719 --- linux-3.0.8/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
46720 +++ linux-3.0.8/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
46721 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46722 unsigned long savelink = 1;
46723 struct timespec ctime;
46724
46725 + pax_track_stack();
46726 +
46727 /* three balancings: (1) old name removal, (2) new name insertion
46728 and (3) maybe "save" link insertion
46729 stat data updates: (1) old directory,
46730 diff -urNp linux-3.0.8/fs/reiserfs/procfs.c linux-3.0.8/fs/reiserfs/procfs.c
46731 --- linux-3.0.8/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
46732 +++ linux-3.0.8/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
46733 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46734 "SMALL_TAILS " : "NO_TAILS ",
46735 replay_only(sb) ? "REPLAY_ONLY " : "",
46736 convert_reiserfs(sb) ? "CONV " : "",
46737 - atomic_read(&r->s_generation_counter),
46738 + atomic_read_unchecked(&r->s_generation_counter),
46739 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46740 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46741 SF(s_good_search_by_key_reada), SF(s_bmaps),
46742 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46743 struct journal_params *jp = &rs->s_v1.s_journal;
46744 char b[BDEVNAME_SIZE];
46745
46746 + pax_track_stack();
46747 +
46748 seq_printf(m, /* on-disk fields */
46749 "jp_journal_1st_block: \t%i\n"
46750 "jp_journal_dev: \t%s[%x]\n"
46751 diff -urNp linux-3.0.8/fs/reiserfs/stree.c linux-3.0.8/fs/reiserfs/stree.c
46752 --- linux-3.0.8/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
46753 +++ linux-3.0.8/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
46754 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46755 int iter = 0;
46756 #endif
46757
46758 + pax_track_stack();
46759 +
46760 BUG_ON(!th->t_trans_id);
46761
46762 init_tb_struct(th, &s_del_balance, sb, path,
46763 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46764 int retval;
46765 int quota_cut_bytes = 0;
46766
46767 + pax_track_stack();
46768 +
46769 BUG_ON(!th->t_trans_id);
46770
46771 le_key2cpu_key(&cpu_key, key);
46772 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46773 int quota_cut_bytes;
46774 loff_t tail_pos = 0;
46775
46776 + pax_track_stack();
46777 +
46778 BUG_ON(!th->t_trans_id);
46779
46780 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46781 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46782 int retval;
46783 int fs_gen;
46784
46785 + pax_track_stack();
46786 +
46787 BUG_ON(!th->t_trans_id);
46788
46789 fs_gen = get_generation(inode->i_sb);
46790 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46791 int fs_gen = 0;
46792 int quota_bytes = 0;
46793
46794 + pax_track_stack();
46795 +
46796 BUG_ON(!th->t_trans_id);
46797
46798 if (inode) { /* Do we count quotas for item? */
46799 diff -urNp linux-3.0.8/fs/reiserfs/super.c linux-3.0.8/fs/reiserfs/super.c
46800 --- linux-3.0.8/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
46801 +++ linux-3.0.8/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
46802 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46803 {.option_name = NULL}
46804 };
46805
46806 + pax_track_stack();
46807 +
46808 *blocks = 0;
46809 if (!options || !*options)
46810 /* use default configuration: create tails, journaling on, no
46811 diff -urNp linux-3.0.8/fs/select.c linux-3.0.8/fs/select.c
46812 --- linux-3.0.8/fs/select.c 2011-07-21 22:17:23.000000000 -0400
46813 +++ linux-3.0.8/fs/select.c 2011-08-23 21:48:14.000000000 -0400
46814 @@ -20,6 +20,7 @@
46815 #include <linux/module.h>
46816 #include <linux/slab.h>
46817 #include <linux/poll.h>
46818 +#include <linux/security.h>
46819 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46820 #include <linux/file.h>
46821 #include <linux/fdtable.h>
46822 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46823 int retval, i, timed_out = 0;
46824 unsigned long slack = 0;
46825
46826 + pax_track_stack();
46827 +
46828 rcu_read_lock();
46829 retval = max_select_fd(n, fds);
46830 rcu_read_unlock();
46831 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46832 /* Allocate small arguments on the stack to save memory and be faster */
46833 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46834
46835 + pax_track_stack();
46836 +
46837 ret = -EINVAL;
46838 if (n < 0)
46839 goto out_nofds;
46840 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46841 struct poll_list *walk = head;
46842 unsigned long todo = nfds;
46843
46844 + pax_track_stack();
46845 +
46846 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46847 if (nfds > rlimit(RLIMIT_NOFILE))
46848 return -EINVAL;
46849
46850 diff -urNp linux-3.0.8/fs/seq_file.c linux-3.0.8/fs/seq_file.c
46851 --- linux-3.0.8/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
46852 +++ linux-3.0.8/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
46853 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46854 return 0;
46855 }
46856 if (!m->buf) {
46857 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46858 + m->size = PAGE_SIZE;
46859 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46860 if (!m->buf)
46861 return -ENOMEM;
46862 }
46863 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46864 Eoverflow:
46865 m->op->stop(m, p);
46866 kfree(m->buf);
46867 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46868 + m->size <<= 1;
46869 + m->buf = kmalloc(m->size, GFP_KERNEL);
46870 return !m->buf ? -ENOMEM : -EAGAIN;
46871 }
46872
46873 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46874 m->version = file->f_version;
46875 /* grab buffer if we didn't have one */
46876 if (!m->buf) {
46877 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46878 + m->size = PAGE_SIZE;
46879 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46880 if (!m->buf)
46881 goto Enomem;
46882 }
46883 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46884 goto Fill;
46885 m->op->stop(m, p);
46886 kfree(m->buf);
46887 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46888 + m->size <<= 1;
46889 + m->buf = kmalloc(m->size, GFP_KERNEL);
46890 if (!m->buf)
46891 goto Enomem;
46892 m->count = 0;
46893 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46894 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46895 void *data)
46896 {
46897 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46898 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46899 int res = -ENOMEM;
46900
46901 if (op) {
46902 diff -urNp linux-3.0.8/fs/splice.c linux-3.0.8/fs/splice.c
46903 --- linux-3.0.8/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
46904 +++ linux-3.0.8/fs/splice.c 2011-10-06 04:17:55.000000000 -0400
46905 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46906 pipe_lock(pipe);
46907
46908 for (;;) {
46909 - if (!pipe->readers) {
46910 + if (!atomic_read(&pipe->readers)) {
46911 send_sig(SIGPIPE, current, 0);
46912 if (!ret)
46913 ret = -EPIPE;
46914 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46915 do_wakeup = 0;
46916 }
46917
46918 - pipe->waiting_writers++;
46919 + atomic_inc(&pipe->waiting_writers);
46920 pipe_wait(pipe);
46921 - pipe->waiting_writers--;
46922 + atomic_dec(&pipe->waiting_writers);
46923 }
46924
46925 pipe_unlock(pipe);
46926 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46927 .spd_release = spd_release_page,
46928 };
46929
46930 + pax_track_stack();
46931 +
46932 if (splice_grow_spd(pipe, &spd))
46933 return -ENOMEM;
46934
46935 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46936 old_fs = get_fs();
46937 set_fs(get_ds());
46938 /* The cast to a user pointer is valid due to the set_fs() */
46939 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46940 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46941 set_fs(old_fs);
46942
46943 return res;
46944 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46945 old_fs = get_fs();
46946 set_fs(get_ds());
46947 /* The cast to a user pointer is valid due to the set_fs() */
46948 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46949 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46950 set_fs(old_fs);
46951
46952 return res;
46953 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46954 .spd_release = spd_release_page,
46955 };
46956
46957 + pax_track_stack();
46958 +
46959 if (splice_grow_spd(pipe, &spd))
46960 return -ENOMEM;
46961
46962 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46963 goto err;
46964
46965 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46966 - vec[i].iov_base = (void __user *) page_address(page);
46967 + vec[i].iov_base = (void __force_user *) page_address(page);
46968 vec[i].iov_len = this_len;
46969 spd.pages[i] = page;
46970 spd.nr_pages++;
46971 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46972 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46973 {
46974 while (!pipe->nrbufs) {
46975 - if (!pipe->writers)
46976 + if (!atomic_read(&pipe->writers))
46977 return 0;
46978
46979 - if (!pipe->waiting_writers && sd->num_spliced)
46980 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46981 return 0;
46982
46983 if (sd->flags & SPLICE_F_NONBLOCK)
46984 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46985 * out of the pipe right after the splice_to_pipe(). So set
46986 * PIPE_READERS appropriately.
46987 */
46988 - pipe->readers = 1;
46989 + atomic_set(&pipe->readers, 1);
46990
46991 current->splice_pipe = pipe;
46992 }
46993 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46994 };
46995 long ret;
46996
46997 + pax_track_stack();
46998 +
46999 pipe = get_pipe_info(file);
47000 if (!pipe)
47001 return -EBADF;
47002 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
47003 ret = -ERESTARTSYS;
47004 break;
47005 }
47006 - if (!pipe->writers)
47007 + if (!atomic_read(&pipe->writers))
47008 break;
47009 - if (!pipe->waiting_writers) {
47010 + if (!atomic_read(&pipe->waiting_writers)) {
47011 if (flags & SPLICE_F_NONBLOCK) {
47012 ret = -EAGAIN;
47013 break;
47014 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
47015 pipe_lock(pipe);
47016
47017 while (pipe->nrbufs >= pipe->buffers) {
47018 - if (!pipe->readers) {
47019 + if (!atomic_read(&pipe->readers)) {
47020 send_sig(SIGPIPE, current, 0);
47021 ret = -EPIPE;
47022 break;
47023 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
47024 ret = -ERESTARTSYS;
47025 break;
47026 }
47027 - pipe->waiting_writers++;
47028 + atomic_inc(&pipe->waiting_writers);
47029 pipe_wait(pipe);
47030 - pipe->waiting_writers--;
47031 + atomic_dec(&pipe->waiting_writers);
47032 }
47033
47034 pipe_unlock(pipe);
47035 @@ -1819,14 +1825,14 @@ retry:
47036 pipe_double_lock(ipipe, opipe);
47037
47038 do {
47039 - if (!opipe->readers) {
47040 + if (!atomic_read(&opipe->readers)) {
47041 send_sig(SIGPIPE, current, 0);
47042 if (!ret)
47043 ret = -EPIPE;
47044 break;
47045 }
47046
47047 - if (!ipipe->nrbufs && !ipipe->writers)
47048 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
47049 break;
47050
47051 /*
47052 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
47053 pipe_double_lock(ipipe, opipe);
47054
47055 do {
47056 - if (!opipe->readers) {
47057 + if (!atomic_read(&opipe->readers)) {
47058 send_sig(SIGPIPE, current, 0);
47059 if (!ret)
47060 ret = -EPIPE;
47061 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
47062 * return EAGAIN if we have the potential of some data in the
47063 * future, otherwise just return 0
47064 */
47065 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
47066 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
47067 ret = -EAGAIN;
47068
47069 pipe_unlock(ipipe);
47070 diff -urNp linux-3.0.8/fs/sysfs/file.c linux-3.0.8/fs/sysfs/file.c
47071 --- linux-3.0.8/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
47072 +++ linux-3.0.8/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
47073 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
47074
47075 struct sysfs_open_dirent {
47076 atomic_t refcnt;
47077 - atomic_t event;
47078 + atomic_unchecked_t event;
47079 wait_queue_head_t poll;
47080 struct list_head buffers; /* goes through sysfs_buffer.list */
47081 };
47082 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
47083 if (!sysfs_get_active(attr_sd))
47084 return -ENODEV;
47085
47086 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
47087 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
47088 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
47089
47090 sysfs_put_active(attr_sd);
47091 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
47092 return -ENOMEM;
47093
47094 atomic_set(&new_od->refcnt, 0);
47095 - atomic_set(&new_od->event, 1);
47096 + atomic_set_unchecked(&new_od->event, 1);
47097 init_waitqueue_head(&new_od->poll);
47098 INIT_LIST_HEAD(&new_od->buffers);
47099 goto retry;
47100 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
47101
47102 sysfs_put_active(attr_sd);
47103
47104 - if (buffer->event != atomic_read(&od->event))
47105 + if (buffer->event != atomic_read_unchecked(&od->event))
47106 goto trigger;
47107
47108 return DEFAULT_POLLMASK;
47109 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
47110
47111 od = sd->s_attr.open;
47112 if (od) {
47113 - atomic_inc(&od->event);
47114 + atomic_inc_unchecked(&od->event);
47115 wake_up_interruptible(&od->poll);
47116 }
47117
47118 diff -urNp linux-3.0.8/fs/sysfs/mount.c linux-3.0.8/fs/sysfs/mount.c
47119 --- linux-3.0.8/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
47120 +++ linux-3.0.8/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
47121 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
47122 .s_name = "",
47123 .s_count = ATOMIC_INIT(1),
47124 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
47125 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47126 + .s_mode = S_IFDIR | S_IRWXU,
47127 +#else
47128 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
47129 +#endif
47130 .s_ino = 1,
47131 };
47132
47133 diff -urNp linux-3.0.8/fs/sysfs/symlink.c linux-3.0.8/fs/sysfs/symlink.c
47134 --- linux-3.0.8/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
47135 +++ linux-3.0.8/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
47136 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
47137
47138 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47139 {
47140 - char *page = nd_get_link(nd);
47141 + const char *page = nd_get_link(nd);
47142 if (!IS_ERR(page))
47143 free_page((unsigned long)page);
47144 }
47145 diff -urNp linux-3.0.8/fs/udf/inode.c linux-3.0.8/fs/udf/inode.c
47146 --- linux-3.0.8/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
47147 +++ linux-3.0.8/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
47148 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
47149 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
47150 int lastblock = 0;
47151
47152 + pax_track_stack();
47153 +
47154 prev_epos.offset = udf_file_entry_alloc_offset(inode);
47155 prev_epos.block = iinfo->i_location;
47156 prev_epos.bh = NULL;
47157 diff -urNp linux-3.0.8/fs/udf/misc.c linux-3.0.8/fs/udf/misc.c
47158 --- linux-3.0.8/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
47159 +++ linux-3.0.8/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
47160 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
47161
47162 u8 udf_tag_checksum(const struct tag *t)
47163 {
47164 - u8 *data = (u8 *)t;
47165 + const u8 *data = (const u8 *)t;
47166 u8 checksum = 0;
47167 int i;
47168 for (i = 0; i < sizeof(struct tag); ++i)
47169 diff -urNp linux-3.0.8/fs/utimes.c linux-3.0.8/fs/utimes.c
47170 --- linux-3.0.8/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
47171 +++ linux-3.0.8/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
47172 @@ -1,6 +1,7 @@
47173 #include <linux/compiler.h>
47174 #include <linux/file.h>
47175 #include <linux/fs.h>
47176 +#include <linux/security.h>
47177 #include <linux/linkage.h>
47178 #include <linux/mount.h>
47179 #include <linux/namei.h>
47180 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
47181 goto mnt_drop_write_and_out;
47182 }
47183 }
47184 +
47185 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47186 + error = -EACCES;
47187 + goto mnt_drop_write_and_out;
47188 + }
47189 +
47190 mutex_lock(&inode->i_mutex);
47191 error = notify_change(path->dentry, &newattrs);
47192 mutex_unlock(&inode->i_mutex);
47193 diff -urNp linux-3.0.8/fs/xattr_acl.c linux-3.0.8/fs/xattr_acl.c
47194 --- linux-3.0.8/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
47195 +++ linux-3.0.8/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
47196 @@ -17,8 +17,8 @@
47197 struct posix_acl *
47198 posix_acl_from_xattr(const void *value, size_t size)
47199 {
47200 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47201 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47202 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47203 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47204 int count;
47205 struct posix_acl *acl;
47206 struct posix_acl_entry *acl_e;
47207 diff -urNp linux-3.0.8/fs/xattr.c linux-3.0.8/fs/xattr.c
47208 --- linux-3.0.8/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
47209 +++ linux-3.0.8/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
47210 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47211 * Extended attribute SET operations
47212 */
47213 static long
47214 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
47215 +setxattr(struct path *path, const char __user *name, const void __user *value,
47216 size_t size, int flags)
47217 {
47218 int error;
47219 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
47220 return PTR_ERR(kvalue);
47221 }
47222
47223 - error = vfs_setxattr(d, kname, kvalue, size, flags);
47224 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47225 + error = -EACCES;
47226 + goto out;
47227 + }
47228 +
47229 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47230 +out:
47231 kfree(kvalue);
47232 return error;
47233 }
47234 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47235 return error;
47236 error = mnt_want_write(path.mnt);
47237 if (!error) {
47238 - error = setxattr(path.dentry, name, value, size, flags);
47239 + error = setxattr(&path, name, value, size, flags);
47240 mnt_drop_write(path.mnt);
47241 }
47242 path_put(&path);
47243 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47244 return error;
47245 error = mnt_want_write(path.mnt);
47246 if (!error) {
47247 - error = setxattr(path.dentry, name, value, size, flags);
47248 + error = setxattr(&path, name, value, size, flags);
47249 mnt_drop_write(path.mnt);
47250 }
47251 path_put(&path);
47252 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
47253 const void __user *,value, size_t, size, int, flags)
47254 {
47255 struct file *f;
47256 - struct dentry *dentry;
47257 int error = -EBADF;
47258
47259 f = fget(fd);
47260 if (!f)
47261 return error;
47262 - dentry = f->f_path.dentry;
47263 - audit_inode(NULL, dentry);
47264 + audit_inode(NULL, f->f_path.dentry);
47265 error = mnt_want_write_file(f);
47266 if (!error) {
47267 - error = setxattr(dentry, name, value, size, flags);
47268 + error = setxattr(&f->f_path, name, value, size, flags);
47269 mnt_drop_write(f->f_path.mnt);
47270 }
47271 fput(f);
47272 diff -urNp linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl32.c
47273 --- linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
47274 +++ linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
47275 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
47276 xfs_fsop_geom_t fsgeo;
47277 int error;
47278
47279 + memset(&fsgeo, 0, sizeof(fsgeo));
47280 error = xfs_fs_geometry(mp, &fsgeo, 3);
47281 if (error)
47282 return -error;
47283 diff -urNp linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl.c
47284 --- linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
47285 +++ linux-3.0.8/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
47286 @@ -128,7 +128,7 @@ xfs_find_handle(
47287 }
47288
47289 error = -EFAULT;
47290 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47291 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47292 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47293 goto out_put;
47294
47295 diff -urNp linux-3.0.8/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.8/fs/xfs/linux-2.6/xfs_iops.c
47296 --- linux-3.0.8/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
47297 +++ linux-3.0.8/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
47298 @@ -437,7 +437,7 @@ xfs_vn_put_link(
47299 struct nameidata *nd,
47300 void *p)
47301 {
47302 - char *s = nd_get_link(nd);
47303 + const char *s = nd_get_link(nd);
47304
47305 if (!IS_ERR(s))
47306 kfree(s);
47307 diff -urNp linux-3.0.8/fs/xfs/xfs_bmap.c linux-3.0.8/fs/xfs/xfs_bmap.c
47308 --- linux-3.0.8/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
47309 +++ linux-3.0.8/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
47310 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
47311 int nmap,
47312 int ret_nmap);
47313 #else
47314 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47315 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47316 #endif /* DEBUG */
47317
47318 STATIC int
47319 diff -urNp linux-3.0.8/fs/xfs/xfs_dir2_sf.c linux-3.0.8/fs/xfs/xfs_dir2_sf.c
47320 --- linux-3.0.8/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
47321 +++ linux-3.0.8/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
47322 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
47323 }
47324
47325 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
47326 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47327 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47328 + char name[sfep->namelen];
47329 + memcpy(name, sfep->name, sfep->namelen);
47330 + if (filldir(dirent, name, sfep->namelen,
47331 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47332 + *offset = off & 0x7fffffff;
47333 + return 0;
47334 + }
47335 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47336 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47337 *offset = off & 0x7fffffff;
47338 return 0;
47339 diff -urNp linux-3.0.8/grsecurity/gracl_alloc.c linux-3.0.8/grsecurity/gracl_alloc.c
47340 --- linux-3.0.8/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47341 +++ linux-3.0.8/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
47342 @@ -0,0 +1,105 @@
47343 +#include <linux/kernel.h>
47344 +#include <linux/mm.h>
47345 +#include <linux/slab.h>
47346 +#include <linux/vmalloc.h>
47347 +#include <linux/gracl.h>
47348 +#include <linux/grsecurity.h>
47349 +
47350 +static unsigned long alloc_stack_next = 1;
47351 +static unsigned long alloc_stack_size = 1;
47352 +static void **alloc_stack;
47353 +
47354 +static __inline__ int
47355 +alloc_pop(void)
47356 +{
47357 + if (alloc_stack_next == 1)
47358 + return 0;
47359 +
47360 + kfree(alloc_stack[alloc_stack_next - 2]);
47361 +
47362 + alloc_stack_next--;
47363 +
47364 + return 1;
47365 +}
47366 +
47367 +static __inline__ int
47368 +alloc_push(void *buf)
47369 +{
47370 + if (alloc_stack_next >= alloc_stack_size)
47371 + return 1;
47372 +
47373 + alloc_stack[alloc_stack_next - 1] = buf;
47374 +
47375 + alloc_stack_next++;
47376 +
47377 + return 0;
47378 +}
47379 +
47380 +void *
47381 +acl_alloc(unsigned long len)
47382 +{
47383 + void *ret = NULL;
47384 +
47385 + if (!len || len > PAGE_SIZE)
47386 + goto out;
47387 +
47388 + ret = kmalloc(len, GFP_KERNEL);
47389 +
47390 + if (ret) {
47391 + if (alloc_push(ret)) {
47392 + kfree(ret);
47393 + ret = NULL;
47394 + }
47395 + }
47396 +
47397 +out:
47398 + return ret;
47399 +}
47400 +
47401 +void *
47402 +acl_alloc_num(unsigned long num, unsigned long len)
47403 +{
47404 + if (!len || (num > (PAGE_SIZE / len)))
47405 + return NULL;
47406 +
47407 + return acl_alloc(num * len);
47408 +}
47409 +
47410 +void
47411 +acl_free_all(void)
47412 +{
47413 + if (gr_acl_is_enabled() || !alloc_stack)
47414 + return;
47415 +
47416 + while (alloc_pop()) ;
47417 +
47418 + if (alloc_stack) {
47419 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47420 + kfree(alloc_stack);
47421 + else
47422 + vfree(alloc_stack);
47423 + }
47424 +
47425 + alloc_stack = NULL;
47426 + alloc_stack_size = 1;
47427 + alloc_stack_next = 1;
47428 +
47429 + return;
47430 +}
47431 +
47432 +int
47433 +acl_alloc_stack_init(unsigned long size)
47434 +{
47435 + if ((size * sizeof (void *)) <= PAGE_SIZE)
47436 + alloc_stack =
47437 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47438 + else
47439 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
47440 +
47441 + alloc_stack_size = size;
47442 +
47443 + if (!alloc_stack)
47444 + return 0;
47445 + else
47446 + return 1;
47447 +}
47448 diff -urNp linux-3.0.8/grsecurity/gracl.c linux-3.0.8/grsecurity/gracl.c
47449 --- linux-3.0.8/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47450 +++ linux-3.0.8/grsecurity/gracl.c 2011-10-17 06:42:59.000000000 -0400
47451 @@ -0,0 +1,4154 @@
47452 +#include <linux/kernel.h>
47453 +#include <linux/module.h>
47454 +#include <linux/sched.h>
47455 +#include <linux/mm.h>
47456 +#include <linux/file.h>
47457 +#include <linux/fs.h>
47458 +#include <linux/namei.h>
47459 +#include <linux/mount.h>
47460 +#include <linux/tty.h>
47461 +#include <linux/proc_fs.h>
47462 +#include <linux/lglock.h>
47463 +#include <linux/slab.h>
47464 +#include <linux/vmalloc.h>
47465 +#include <linux/types.h>
47466 +#include <linux/sysctl.h>
47467 +#include <linux/netdevice.h>
47468 +#include <linux/ptrace.h>
47469 +#include <linux/gracl.h>
47470 +#include <linux/gralloc.h>
47471 +#include <linux/grsecurity.h>
47472 +#include <linux/grinternal.h>
47473 +#include <linux/pid_namespace.h>
47474 +#include <linux/fdtable.h>
47475 +#include <linux/percpu.h>
47476 +
47477 +#include <asm/uaccess.h>
47478 +#include <asm/errno.h>
47479 +#include <asm/mman.h>
47480 +
47481 +static struct acl_role_db acl_role_set;
47482 +static struct name_db name_set;
47483 +static struct inodev_db inodev_set;
47484 +
47485 +/* for keeping track of userspace pointers used for subjects, so we
47486 + can share references in the kernel as well
47487 +*/
47488 +
47489 +static struct path real_root;
47490 +
47491 +static struct acl_subj_map_db subj_map_set;
47492 +
47493 +static struct acl_role_label *default_role;
47494 +
47495 +static struct acl_role_label *role_list;
47496 +
47497 +static u16 acl_sp_role_value;
47498 +
47499 +extern char *gr_shared_page[4];
47500 +static DEFINE_MUTEX(gr_dev_mutex);
47501 +DEFINE_RWLOCK(gr_inode_lock);
47502 +
47503 +struct gr_arg *gr_usermode;
47504 +
47505 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
47506 +
47507 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47508 +extern void gr_clear_learn_entries(void);
47509 +
47510 +#ifdef CONFIG_GRKERNSEC_RESLOG
47511 +extern void gr_log_resource(const struct task_struct *task,
47512 + const int res, const unsigned long wanted, const int gt);
47513 +#endif
47514 +
47515 +unsigned char *gr_system_salt;
47516 +unsigned char *gr_system_sum;
47517 +
47518 +static struct sprole_pw **acl_special_roles = NULL;
47519 +static __u16 num_sprole_pws = 0;
47520 +
47521 +static struct acl_role_label *kernel_role = NULL;
47522 +
47523 +static unsigned int gr_auth_attempts = 0;
47524 +static unsigned long gr_auth_expires = 0UL;
47525 +
47526 +#ifdef CONFIG_NET
47527 +extern struct vfsmount *sock_mnt;
47528 +#endif
47529 +
47530 +extern struct vfsmount *pipe_mnt;
47531 +extern struct vfsmount *shm_mnt;
47532 +#ifdef CONFIG_HUGETLBFS
47533 +extern struct vfsmount *hugetlbfs_vfsmount;
47534 +#endif
47535 +
47536 +static struct acl_object_label *fakefs_obj_rw;
47537 +static struct acl_object_label *fakefs_obj_rwx;
47538 +
47539 +extern int gr_init_uidset(void);
47540 +extern void gr_free_uidset(void);
47541 +extern void gr_remove_uid(uid_t uid);
47542 +extern int gr_find_uid(uid_t uid);
47543 +
47544 +DECLARE_BRLOCK(vfsmount_lock);
47545 +
47546 +__inline__ int
47547 +gr_acl_is_enabled(void)
47548 +{
47549 + return (gr_status & GR_READY);
47550 +}
47551 +
47552 +#ifdef CONFIG_BTRFS_FS
47553 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47554 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47555 +#endif
47556 +
47557 +static inline dev_t __get_dev(const struct dentry *dentry)
47558 +{
47559 +#ifdef CONFIG_BTRFS_FS
47560 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47561 + return get_btrfs_dev_from_inode(dentry->d_inode);
47562 + else
47563 +#endif
47564 + return dentry->d_inode->i_sb->s_dev;
47565 +}
47566 +
47567 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47568 +{
47569 + return __get_dev(dentry);
47570 +}
47571 +
47572 +static char gr_task_roletype_to_char(struct task_struct *task)
47573 +{
47574 + switch (task->role->roletype &
47575 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47576 + GR_ROLE_SPECIAL)) {
47577 + case GR_ROLE_DEFAULT:
47578 + return 'D';
47579 + case GR_ROLE_USER:
47580 + return 'U';
47581 + case GR_ROLE_GROUP:
47582 + return 'G';
47583 + case GR_ROLE_SPECIAL:
47584 + return 'S';
47585 + }
47586 +
47587 + return 'X';
47588 +}
47589 +
47590 +char gr_roletype_to_char(void)
47591 +{
47592 + return gr_task_roletype_to_char(current);
47593 +}
47594 +
47595 +__inline__ int
47596 +gr_acl_tpe_check(void)
47597 +{
47598 + if (unlikely(!(gr_status & GR_READY)))
47599 + return 0;
47600 + if (current->role->roletype & GR_ROLE_TPE)
47601 + return 1;
47602 + else
47603 + return 0;
47604 +}
47605 +
47606 +int
47607 +gr_handle_rawio(const struct inode *inode)
47608 +{
47609 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47610 + if (inode && S_ISBLK(inode->i_mode) &&
47611 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47612 + !capable(CAP_SYS_RAWIO))
47613 + return 1;
47614 +#endif
47615 + return 0;
47616 +}
47617 +
47618 +static int
47619 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47620 +{
47621 + if (likely(lena != lenb))
47622 + return 0;
47623 +
47624 + return !memcmp(a, b, lena);
47625 +}
47626 +
47627 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
47628 +{
47629 + *buflen -= namelen;
47630 + if (*buflen < 0)
47631 + return -ENAMETOOLONG;
47632 + *buffer -= namelen;
47633 + memcpy(*buffer, str, namelen);
47634 + return 0;
47635 +}
47636 +
47637 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
47638 +{
47639 + return prepend(buffer, buflen, name->name, name->len);
47640 +}
47641 +
47642 +static int prepend_path(const struct path *path, struct path *root,
47643 + char **buffer, int *buflen)
47644 +{
47645 + struct dentry *dentry = path->dentry;
47646 + struct vfsmount *vfsmnt = path->mnt;
47647 + bool slash = false;
47648 + int error = 0;
47649 +
47650 + while (dentry != root->dentry || vfsmnt != root->mnt) {
47651 + struct dentry * parent;
47652 +
47653 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47654 + /* Global root? */
47655 + if (vfsmnt->mnt_parent == vfsmnt) {
47656 + goto out;
47657 + }
47658 + dentry = vfsmnt->mnt_mountpoint;
47659 + vfsmnt = vfsmnt->mnt_parent;
47660 + continue;
47661 + }
47662 + parent = dentry->d_parent;
47663 + prefetch(parent);
47664 + spin_lock(&dentry->d_lock);
47665 + error = prepend_name(buffer, buflen, &dentry->d_name);
47666 + spin_unlock(&dentry->d_lock);
47667 + if (!error)
47668 + error = prepend(buffer, buflen, "/", 1);
47669 + if (error)
47670 + break;
47671 +
47672 + slash = true;
47673 + dentry = parent;
47674 + }
47675 +
47676 +out:
47677 + if (!error && !slash)
47678 + error = prepend(buffer, buflen, "/", 1);
47679 +
47680 + return error;
47681 +}
47682 +
47683 +/* this must be called with vfsmount_lock and rename_lock held */
47684 +
47685 +static char *__our_d_path(const struct path *path, struct path *root,
47686 + char *buf, int buflen)
47687 +{
47688 + char *res = buf + buflen;
47689 + int error;
47690 +
47691 + prepend(&res, &buflen, "\0", 1);
47692 + error = prepend_path(path, root, &res, &buflen);
47693 + if (error)
47694 + return ERR_PTR(error);
47695 +
47696 + return res;
47697 +}
47698 +
47699 +static char *
47700 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47701 +{
47702 + char *retval;
47703 +
47704 + retval = __our_d_path(path, root, buf, buflen);
47705 + if (unlikely(IS_ERR(retval)))
47706 + retval = strcpy(buf, "<path too long>");
47707 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47708 + retval[1] = '\0';
47709 +
47710 + return retval;
47711 +}
47712 +
47713 +static char *
47714 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47715 + char *buf, int buflen)
47716 +{
47717 + struct path path;
47718 + char *res;
47719 +
47720 + path.dentry = (struct dentry *)dentry;
47721 + path.mnt = (struct vfsmount *)vfsmnt;
47722 +
47723 + /* we can use real_root.dentry, real_root.mnt, because this is only called
47724 + by the RBAC system */
47725 + res = gen_full_path(&path, &real_root, buf, buflen);
47726 +
47727 + return res;
47728 +}
47729 +
47730 +static char *
47731 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47732 + char *buf, int buflen)
47733 +{
47734 + char *res;
47735 + struct path path;
47736 + struct path root;
47737 + struct task_struct *reaper = &init_task;
47738 +
47739 + path.dentry = (struct dentry *)dentry;
47740 + path.mnt = (struct vfsmount *)vfsmnt;
47741 +
47742 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47743 + get_fs_root(reaper->fs, &root);
47744 +
47745 + write_seqlock(&rename_lock);
47746 + br_read_lock(vfsmount_lock);
47747 + res = gen_full_path(&path, &root, buf, buflen);
47748 + br_read_unlock(vfsmount_lock);
47749 + write_sequnlock(&rename_lock);
47750 +
47751 + path_put(&root);
47752 + return res;
47753 +}
47754 +
47755 +static char *
47756 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47757 +{
47758 + char *ret;
47759 + write_seqlock(&rename_lock);
47760 + br_read_lock(vfsmount_lock);
47761 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47762 + PAGE_SIZE);
47763 + br_read_unlock(vfsmount_lock);
47764 + write_sequnlock(&rename_lock);
47765 + return ret;
47766 +}
47767 +
47768 +static char *
47769 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47770 +{
47771 + char *ret;
47772 + char *buf;
47773 + int buflen;
47774 +
47775 + write_seqlock(&rename_lock);
47776 + br_read_lock(vfsmount_lock);
47777 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47778 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47779 + buflen = (int)(ret - buf);
47780 + if (buflen >= 5)
47781 + prepend(&ret, &buflen, "/proc", 5);
47782 + else
47783 + ret = strcpy(buf, "<path too long>");
47784 + br_read_unlock(vfsmount_lock);
47785 + write_sequnlock(&rename_lock);
47786 + return ret;
47787 +}
47788 +
47789 +char *
47790 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47791 +{
47792 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47793 + PAGE_SIZE);
47794 +}
47795 +
47796 +char *
47797 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47798 +{
47799 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47800 + PAGE_SIZE);
47801 +}
47802 +
47803 +char *
47804 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47805 +{
47806 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47807 + PAGE_SIZE);
47808 +}
47809 +
47810 +char *
47811 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47812 +{
47813 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47814 + PAGE_SIZE);
47815 +}
47816 +
47817 +char *
47818 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47819 +{
47820 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47821 + PAGE_SIZE);
47822 +}
47823 +
47824 +__inline__ __u32
47825 +to_gr_audit(const __u32 reqmode)
47826 +{
47827 + /* masks off auditable permission flags, then shifts them to create
47828 + auditing flags, and adds the special case of append auditing if
47829 + we're requesting write */
47830 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47831 +}
47832 +
47833 +struct acl_subject_label *
47834 +lookup_subject_map(const struct acl_subject_label *userp)
47835 +{
47836 + unsigned int index = shash(userp, subj_map_set.s_size);
47837 + struct subject_map *match;
47838 +
47839 + match = subj_map_set.s_hash[index];
47840 +
47841 + while (match && match->user != userp)
47842 + match = match->next;
47843 +
47844 + if (match != NULL)
47845 + return match->kernel;
47846 + else
47847 + return NULL;
47848 +}
47849 +
47850 +static void
47851 +insert_subj_map_entry(struct subject_map *subjmap)
47852 +{
47853 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47854 + struct subject_map **curr;
47855 +
47856 + subjmap->prev = NULL;
47857 +
47858 + curr = &subj_map_set.s_hash[index];
47859 + if (*curr != NULL)
47860 + (*curr)->prev = subjmap;
47861 +
47862 + subjmap->next = *curr;
47863 + *curr = subjmap;
47864 +
47865 + return;
47866 +}
47867 +
47868 +static struct acl_role_label *
47869 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47870 + const gid_t gid)
47871 +{
47872 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47873 + struct acl_role_label *match;
47874 + struct role_allowed_ip *ipp;
47875 + unsigned int x;
47876 + u32 curr_ip = task->signal->curr_ip;
47877 +
47878 + task->signal->saved_ip = curr_ip;
47879 +
47880 + match = acl_role_set.r_hash[index];
47881 +
47882 + while (match) {
47883 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47884 + for (x = 0; x < match->domain_child_num; x++) {
47885 + if (match->domain_children[x] == uid)
47886 + goto found;
47887 + }
47888 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47889 + break;
47890 + match = match->next;
47891 + }
47892 +found:
47893 + if (match == NULL) {
47894 + try_group:
47895 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47896 + match = acl_role_set.r_hash[index];
47897 +
47898 + while (match) {
47899 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47900 + for (x = 0; x < match->domain_child_num; x++) {
47901 + if (match->domain_children[x] == gid)
47902 + goto found2;
47903 + }
47904 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47905 + break;
47906 + match = match->next;
47907 + }
47908 +found2:
47909 + if (match == NULL)
47910 + match = default_role;
47911 + if (match->allowed_ips == NULL)
47912 + return match;
47913 + else {
47914 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47915 + if (likely
47916 + ((ntohl(curr_ip) & ipp->netmask) ==
47917 + (ntohl(ipp->addr) & ipp->netmask)))
47918 + return match;
47919 + }
47920 + match = default_role;
47921 + }
47922 + } else if (match->allowed_ips == NULL) {
47923 + return match;
47924 + } else {
47925 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47926 + if (likely
47927 + ((ntohl(curr_ip) & ipp->netmask) ==
47928 + (ntohl(ipp->addr) & ipp->netmask)))
47929 + return match;
47930 + }
47931 + goto try_group;
47932 + }
47933 +
47934 + return match;
47935 +}
47936 +
47937 +struct acl_subject_label *
47938 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47939 + const struct acl_role_label *role)
47940 +{
47941 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47942 + struct acl_subject_label *match;
47943 +
47944 + match = role->subj_hash[index];
47945 +
47946 + while (match && (match->inode != ino || match->device != dev ||
47947 + (match->mode & GR_DELETED))) {
47948 + match = match->next;
47949 + }
47950 +
47951 + if (match && !(match->mode & GR_DELETED))
47952 + return match;
47953 + else
47954 + return NULL;
47955 +}
47956 +
47957 +struct acl_subject_label *
47958 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47959 + const struct acl_role_label *role)
47960 +{
47961 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47962 + struct acl_subject_label *match;
47963 +
47964 + match = role->subj_hash[index];
47965 +
47966 + while (match && (match->inode != ino || match->device != dev ||
47967 + !(match->mode & GR_DELETED))) {
47968 + match = match->next;
47969 + }
47970 +
47971 + if (match && (match->mode & GR_DELETED))
47972 + return match;
47973 + else
47974 + return NULL;
47975 +}
47976 +
47977 +static struct acl_object_label *
47978 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47979 + const struct acl_subject_label *subj)
47980 +{
47981 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47982 + struct acl_object_label *match;
47983 +
47984 + match = subj->obj_hash[index];
47985 +
47986 + while (match && (match->inode != ino || match->device != dev ||
47987 + (match->mode & GR_DELETED))) {
47988 + match = match->next;
47989 + }
47990 +
47991 + if (match && !(match->mode & GR_DELETED))
47992 + return match;
47993 + else
47994 + return NULL;
47995 +}
47996 +
47997 +static struct acl_object_label *
47998 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47999 + const struct acl_subject_label *subj)
48000 +{
48001 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48002 + struct acl_object_label *match;
48003 +
48004 + match = subj->obj_hash[index];
48005 +
48006 + while (match && (match->inode != ino || match->device != dev ||
48007 + !(match->mode & GR_DELETED))) {
48008 + match = match->next;
48009 + }
48010 +
48011 + if (match && (match->mode & GR_DELETED))
48012 + return match;
48013 +
48014 + match = subj->obj_hash[index];
48015 +
48016 + while (match && (match->inode != ino || match->device != dev ||
48017 + (match->mode & GR_DELETED))) {
48018 + match = match->next;
48019 + }
48020 +
48021 + if (match && !(match->mode & GR_DELETED))
48022 + return match;
48023 + else
48024 + return NULL;
48025 +}
48026 +
48027 +static struct name_entry *
48028 +lookup_name_entry(const char *name)
48029 +{
48030 + unsigned int len = strlen(name);
48031 + unsigned int key = full_name_hash(name, len);
48032 + unsigned int index = key % name_set.n_size;
48033 + struct name_entry *match;
48034 +
48035 + match = name_set.n_hash[index];
48036 +
48037 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48038 + match = match->next;
48039 +
48040 + return match;
48041 +}
48042 +
48043 +static struct name_entry *
48044 +lookup_name_entry_create(const char *name)
48045 +{
48046 + unsigned int len = strlen(name);
48047 + unsigned int key = full_name_hash(name, len);
48048 + unsigned int index = key % name_set.n_size;
48049 + struct name_entry *match;
48050 +
48051 + match = name_set.n_hash[index];
48052 +
48053 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48054 + !match->deleted))
48055 + match = match->next;
48056 +
48057 + if (match && match->deleted)
48058 + return match;
48059 +
48060 + match = name_set.n_hash[index];
48061 +
48062 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48063 + match->deleted))
48064 + match = match->next;
48065 +
48066 + if (match && !match->deleted)
48067 + return match;
48068 + else
48069 + return NULL;
48070 +}
48071 +
48072 +static struct inodev_entry *
48073 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
48074 +{
48075 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
48076 + struct inodev_entry *match;
48077 +
48078 + match = inodev_set.i_hash[index];
48079 +
48080 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48081 + match = match->next;
48082 +
48083 + return match;
48084 +}
48085 +
48086 +static void
48087 +insert_inodev_entry(struct inodev_entry *entry)
48088 +{
48089 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48090 + inodev_set.i_size);
48091 + struct inodev_entry **curr;
48092 +
48093 + entry->prev = NULL;
48094 +
48095 + curr = &inodev_set.i_hash[index];
48096 + if (*curr != NULL)
48097 + (*curr)->prev = entry;
48098 +
48099 + entry->next = *curr;
48100 + *curr = entry;
48101 +
48102 + return;
48103 +}
48104 +
48105 +static void
48106 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48107 +{
48108 + unsigned int index =
48109 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48110 + struct acl_role_label **curr;
48111 + struct acl_role_label *tmp;
48112 +
48113 + curr = &acl_role_set.r_hash[index];
48114 +
48115 + /* if role was already inserted due to domains and already has
48116 + a role in the same bucket as it attached, then we need to
48117 + combine these two buckets
48118 + */
48119 + if (role->next) {
48120 + tmp = role->next;
48121 + while (tmp->next)
48122 + tmp = tmp->next;
48123 + tmp->next = *curr;
48124 + } else
48125 + role->next = *curr;
48126 + *curr = role;
48127 +
48128 + return;
48129 +}
48130 +
48131 +static void
48132 +insert_acl_role_label(struct acl_role_label *role)
48133 +{
48134 + int i;
48135 +
48136 + if (role_list == NULL) {
48137 + role_list = role;
48138 + role->prev = NULL;
48139 + } else {
48140 + role->prev = role_list;
48141 + role_list = role;
48142 + }
48143 +
48144 + /* used for hash chains */
48145 + role->next = NULL;
48146 +
48147 + if (role->roletype & GR_ROLE_DOMAIN) {
48148 + for (i = 0; i < role->domain_child_num; i++)
48149 + __insert_acl_role_label(role, role->domain_children[i]);
48150 + } else
48151 + __insert_acl_role_label(role, role->uidgid);
48152 +}
48153 +
48154 +static int
48155 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48156 +{
48157 + struct name_entry **curr, *nentry;
48158 + struct inodev_entry *ientry;
48159 + unsigned int len = strlen(name);
48160 + unsigned int key = full_name_hash(name, len);
48161 + unsigned int index = key % name_set.n_size;
48162 +
48163 + curr = &name_set.n_hash[index];
48164 +
48165 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48166 + curr = &((*curr)->next);
48167 +
48168 + if (*curr != NULL)
48169 + return 1;
48170 +
48171 + nentry = acl_alloc(sizeof (struct name_entry));
48172 + if (nentry == NULL)
48173 + return 0;
48174 + ientry = acl_alloc(sizeof (struct inodev_entry));
48175 + if (ientry == NULL)
48176 + return 0;
48177 + ientry->nentry = nentry;
48178 +
48179 + nentry->key = key;
48180 + nentry->name = name;
48181 + nentry->inode = inode;
48182 + nentry->device = device;
48183 + nentry->len = len;
48184 + nentry->deleted = deleted;
48185 +
48186 + nentry->prev = NULL;
48187 + curr = &name_set.n_hash[index];
48188 + if (*curr != NULL)
48189 + (*curr)->prev = nentry;
48190 + nentry->next = *curr;
48191 + *curr = nentry;
48192 +
48193 + /* insert us into the table searchable by inode/dev */
48194 + insert_inodev_entry(ientry);
48195 +
48196 + return 1;
48197 +}
48198 +
48199 +static void
48200 +insert_acl_obj_label(struct acl_object_label *obj,
48201 + struct acl_subject_label *subj)
48202 +{
48203 + unsigned int index =
48204 + fhash(obj->inode, obj->device, subj->obj_hash_size);
48205 + struct acl_object_label **curr;
48206 +
48207 +
48208 + obj->prev = NULL;
48209 +
48210 + curr = &subj->obj_hash[index];
48211 + if (*curr != NULL)
48212 + (*curr)->prev = obj;
48213 +
48214 + obj->next = *curr;
48215 + *curr = obj;
48216 +
48217 + return;
48218 +}
48219 +
48220 +static void
48221 +insert_acl_subj_label(struct acl_subject_label *obj,
48222 + struct acl_role_label *role)
48223 +{
48224 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48225 + struct acl_subject_label **curr;
48226 +
48227 + obj->prev = NULL;
48228 +
48229 + curr = &role->subj_hash[index];
48230 + if (*curr != NULL)
48231 + (*curr)->prev = obj;
48232 +
48233 + obj->next = *curr;
48234 + *curr = obj;
48235 +
48236 + return;
48237 +}
48238 +
48239 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48240 +
48241 +static void *
48242 +create_table(__u32 * len, int elementsize)
48243 +{
48244 + unsigned int table_sizes[] = {
48245 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48246 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48247 + 4194301, 8388593, 16777213, 33554393, 67108859
48248 + };
48249 + void *newtable = NULL;
48250 + unsigned int pwr = 0;
48251 +
48252 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48253 + table_sizes[pwr] <= *len)
48254 + pwr++;
48255 +
48256 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48257 + return newtable;
48258 +
48259 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48260 + newtable =
48261 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48262 + else
48263 + newtable = vmalloc(table_sizes[pwr] * elementsize);
48264 +
48265 + *len = table_sizes[pwr];
48266 +
48267 + return newtable;
48268 +}
48269 +
48270 +static int
48271 +init_variables(const struct gr_arg *arg)
48272 +{
48273 + struct task_struct *reaper = &init_task;
48274 + unsigned int stacksize;
48275 +
48276 + subj_map_set.s_size = arg->role_db.num_subjects;
48277 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48278 + name_set.n_size = arg->role_db.num_objects;
48279 + inodev_set.i_size = arg->role_db.num_objects;
48280 +
48281 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
48282 + !name_set.n_size || !inodev_set.i_size)
48283 + return 1;
48284 +
48285 + if (!gr_init_uidset())
48286 + return 1;
48287 +
48288 + /* set up the stack that holds allocation info */
48289 +
48290 + stacksize = arg->role_db.num_pointers + 5;
48291 +
48292 + if (!acl_alloc_stack_init(stacksize))
48293 + return 1;
48294 +
48295 + /* grab reference for the real root dentry and vfsmount */
48296 + get_fs_root(reaper->fs, &real_root);
48297 +
48298 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48299 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48300 +#endif
48301 +
48302 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48303 + if (fakefs_obj_rw == NULL)
48304 + return 1;
48305 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48306 +
48307 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48308 + if (fakefs_obj_rwx == NULL)
48309 + return 1;
48310 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48311 +
48312 + subj_map_set.s_hash =
48313 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48314 + acl_role_set.r_hash =
48315 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48316 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48317 + inodev_set.i_hash =
48318 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48319 +
48320 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48321 + !name_set.n_hash || !inodev_set.i_hash)
48322 + return 1;
48323 +
48324 + memset(subj_map_set.s_hash, 0,
48325 + sizeof(struct subject_map *) * subj_map_set.s_size);
48326 + memset(acl_role_set.r_hash, 0,
48327 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
48328 + memset(name_set.n_hash, 0,
48329 + sizeof (struct name_entry *) * name_set.n_size);
48330 + memset(inodev_set.i_hash, 0,
48331 + sizeof (struct inodev_entry *) * inodev_set.i_size);
48332 +
48333 + return 0;
48334 +}
48335 +
48336 +/* free information not needed after startup
48337 + currently contains user->kernel pointer mappings for subjects
48338 +*/
48339 +
48340 +static void
48341 +free_init_variables(void)
48342 +{
48343 + __u32 i;
48344 +
48345 + if (subj_map_set.s_hash) {
48346 + for (i = 0; i < subj_map_set.s_size; i++) {
48347 + if (subj_map_set.s_hash[i]) {
48348 + kfree(subj_map_set.s_hash[i]);
48349 + subj_map_set.s_hash[i] = NULL;
48350 + }
48351 + }
48352 +
48353 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48354 + PAGE_SIZE)
48355 + kfree(subj_map_set.s_hash);
48356 + else
48357 + vfree(subj_map_set.s_hash);
48358 + }
48359 +
48360 + return;
48361 +}
48362 +
48363 +static void
48364 +free_variables(void)
48365 +{
48366 + struct acl_subject_label *s;
48367 + struct acl_role_label *r;
48368 + struct task_struct *task, *task2;
48369 + unsigned int x;
48370 +
48371 + gr_clear_learn_entries();
48372 +
48373 + read_lock(&tasklist_lock);
48374 + do_each_thread(task2, task) {
48375 + task->acl_sp_role = 0;
48376 + task->acl_role_id = 0;
48377 + task->acl = NULL;
48378 + task->role = NULL;
48379 + } while_each_thread(task2, task);
48380 + read_unlock(&tasklist_lock);
48381 +
48382 + /* release the reference to the real root dentry and vfsmount */
48383 + path_put(&real_root);
48384 +
48385 + /* free all object hash tables */
48386 +
48387 + FOR_EACH_ROLE_START(r)
48388 + if (r->subj_hash == NULL)
48389 + goto next_role;
48390 + FOR_EACH_SUBJECT_START(r, s, x)
48391 + if (s->obj_hash == NULL)
48392 + break;
48393 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48394 + kfree(s->obj_hash);
48395 + else
48396 + vfree(s->obj_hash);
48397 + FOR_EACH_SUBJECT_END(s, x)
48398 + FOR_EACH_NESTED_SUBJECT_START(r, s)
48399 + if (s->obj_hash == NULL)
48400 + break;
48401 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48402 + kfree(s->obj_hash);
48403 + else
48404 + vfree(s->obj_hash);
48405 + FOR_EACH_NESTED_SUBJECT_END(s)
48406 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48407 + kfree(r->subj_hash);
48408 + else
48409 + vfree(r->subj_hash);
48410 + r->subj_hash = NULL;
48411 +next_role:
48412 + FOR_EACH_ROLE_END(r)
48413 +
48414 + acl_free_all();
48415 +
48416 + if (acl_role_set.r_hash) {
48417 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48418 + PAGE_SIZE)
48419 + kfree(acl_role_set.r_hash);
48420 + else
48421 + vfree(acl_role_set.r_hash);
48422 + }
48423 + if (name_set.n_hash) {
48424 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
48425 + PAGE_SIZE)
48426 + kfree(name_set.n_hash);
48427 + else
48428 + vfree(name_set.n_hash);
48429 + }
48430 +
48431 + if (inodev_set.i_hash) {
48432 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48433 + PAGE_SIZE)
48434 + kfree(inodev_set.i_hash);
48435 + else
48436 + vfree(inodev_set.i_hash);
48437 + }
48438 +
48439 + gr_free_uidset();
48440 +
48441 + memset(&name_set, 0, sizeof (struct name_db));
48442 + memset(&inodev_set, 0, sizeof (struct inodev_db));
48443 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48444 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48445 +
48446 + default_role = NULL;
48447 + role_list = NULL;
48448 +
48449 + return;
48450 +}
48451 +
48452 +static __u32
48453 +count_user_objs(struct acl_object_label *userp)
48454 +{
48455 + struct acl_object_label o_tmp;
48456 + __u32 num = 0;
48457 +
48458 + while (userp) {
48459 + if (copy_from_user(&o_tmp, userp,
48460 + sizeof (struct acl_object_label)))
48461 + break;
48462 +
48463 + userp = o_tmp.prev;
48464 + num++;
48465 + }
48466 +
48467 + return num;
48468 +}
48469 +
48470 +static struct acl_subject_label *
48471 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48472 +
48473 +static int
48474 +copy_user_glob(struct acl_object_label *obj)
48475 +{
48476 + struct acl_object_label *g_tmp, **guser;
48477 + unsigned int len;
48478 + char *tmp;
48479 +
48480 + if (obj->globbed == NULL)
48481 + return 0;
48482 +
48483 + guser = &obj->globbed;
48484 + while (*guser) {
48485 + g_tmp = (struct acl_object_label *)
48486 + acl_alloc(sizeof (struct acl_object_label));
48487 + if (g_tmp == NULL)
48488 + return -ENOMEM;
48489 +
48490 + if (copy_from_user(g_tmp, *guser,
48491 + sizeof (struct acl_object_label)))
48492 + return -EFAULT;
48493 +
48494 + len = strnlen_user(g_tmp->filename, PATH_MAX);
48495 +
48496 + if (!len || len >= PATH_MAX)
48497 + return -EINVAL;
48498 +
48499 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48500 + return -ENOMEM;
48501 +
48502 + if (copy_from_user(tmp, g_tmp->filename, len))
48503 + return -EFAULT;
48504 + tmp[len-1] = '\0';
48505 + g_tmp->filename = tmp;
48506 +
48507 + *guser = g_tmp;
48508 + guser = &(g_tmp->next);
48509 + }
48510 +
48511 + return 0;
48512 +}
48513 +
48514 +static int
48515 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48516 + struct acl_role_label *role)
48517 +{
48518 + struct acl_object_label *o_tmp;
48519 + unsigned int len;
48520 + int ret;
48521 + char *tmp;
48522 +
48523 + while (userp) {
48524 + if ((o_tmp = (struct acl_object_label *)
48525 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
48526 + return -ENOMEM;
48527 +
48528 + if (copy_from_user(o_tmp, userp,
48529 + sizeof (struct acl_object_label)))
48530 + return -EFAULT;
48531 +
48532 + userp = o_tmp->prev;
48533 +
48534 + len = strnlen_user(o_tmp->filename, PATH_MAX);
48535 +
48536 + if (!len || len >= PATH_MAX)
48537 + return -EINVAL;
48538 +
48539 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48540 + return -ENOMEM;
48541 +
48542 + if (copy_from_user(tmp, o_tmp->filename, len))
48543 + return -EFAULT;
48544 + tmp[len-1] = '\0';
48545 + o_tmp->filename = tmp;
48546 +
48547 + insert_acl_obj_label(o_tmp, subj);
48548 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48549 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48550 + return -ENOMEM;
48551 +
48552 + ret = copy_user_glob(o_tmp);
48553 + if (ret)
48554 + return ret;
48555 +
48556 + if (o_tmp->nested) {
48557 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48558 + if (IS_ERR(o_tmp->nested))
48559 + return PTR_ERR(o_tmp->nested);
48560 +
48561 + /* insert into nested subject list */
48562 + o_tmp->nested->next = role->hash->first;
48563 + role->hash->first = o_tmp->nested;
48564 + }
48565 + }
48566 +
48567 + return 0;
48568 +}
48569 +
48570 +static __u32
48571 +count_user_subjs(struct acl_subject_label *userp)
48572 +{
48573 + struct acl_subject_label s_tmp;
48574 + __u32 num = 0;
48575 +
48576 + while (userp) {
48577 + if (copy_from_user(&s_tmp, userp,
48578 + sizeof (struct acl_subject_label)))
48579 + break;
48580 +
48581 + userp = s_tmp.prev;
48582 + /* do not count nested subjects against this count, since
48583 + they are not included in the hash table, but are
48584 + attached to objects. We have already counted
48585 + the subjects in userspace for the allocation
48586 + stack
48587 + */
48588 + if (!(s_tmp.mode & GR_NESTED))
48589 + num++;
48590 + }
48591 +
48592 + return num;
48593 +}
48594 +
48595 +static int
48596 +copy_user_allowedips(struct acl_role_label *rolep)
48597 +{
48598 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48599 +
48600 + ruserip = rolep->allowed_ips;
48601 +
48602 + while (ruserip) {
48603 + rlast = rtmp;
48604 +
48605 + if ((rtmp = (struct role_allowed_ip *)
48606 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48607 + return -ENOMEM;
48608 +
48609 + if (copy_from_user(rtmp, ruserip,
48610 + sizeof (struct role_allowed_ip)))
48611 + return -EFAULT;
48612 +
48613 + ruserip = rtmp->prev;
48614 +
48615 + if (!rlast) {
48616 + rtmp->prev = NULL;
48617 + rolep->allowed_ips = rtmp;
48618 + } else {
48619 + rlast->next = rtmp;
48620 + rtmp->prev = rlast;
48621 + }
48622 +
48623 + if (!ruserip)
48624 + rtmp->next = NULL;
48625 + }
48626 +
48627 + return 0;
48628 +}
48629 +
48630 +static int
48631 +copy_user_transitions(struct acl_role_label *rolep)
48632 +{
48633 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
48634 +
48635 + unsigned int len;
48636 + char *tmp;
48637 +
48638 + rusertp = rolep->transitions;
48639 +
48640 + while (rusertp) {
48641 + rlast = rtmp;
48642 +
48643 + if ((rtmp = (struct role_transition *)
48644 + acl_alloc(sizeof (struct role_transition))) == NULL)
48645 + return -ENOMEM;
48646 +
48647 + if (copy_from_user(rtmp, rusertp,
48648 + sizeof (struct role_transition)))
48649 + return -EFAULT;
48650 +
48651 + rusertp = rtmp->prev;
48652 +
48653 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48654 +
48655 + if (!len || len >= GR_SPROLE_LEN)
48656 + return -EINVAL;
48657 +
48658 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48659 + return -ENOMEM;
48660 +
48661 + if (copy_from_user(tmp, rtmp->rolename, len))
48662 + return -EFAULT;
48663 + tmp[len-1] = '\0';
48664 + rtmp->rolename = tmp;
48665 +
48666 + if (!rlast) {
48667 + rtmp->prev = NULL;
48668 + rolep->transitions = rtmp;
48669 + } else {
48670 + rlast->next = rtmp;
48671 + rtmp->prev = rlast;
48672 + }
48673 +
48674 + if (!rusertp)
48675 + rtmp->next = NULL;
48676 + }
48677 +
48678 + return 0;
48679 +}
48680 +
48681 +static struct acl_subject_label *
48682 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48683 +{
48684 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48685 + unsigned int len;
48686 + char *tmp;
48687 + __u32 num_objs;
48688 + struct acl_ip_label **i_tmp, *i_utmp2;
48689 + struct gr_hash_struct ghash;
48690 + struct subject_map *subjmap;
48691 + unsigned int i_num;
48692 + int err;
48693 +
48694 + s_tmp = lookup_subject_map(userp);
48695 +
48696 + /* we've already copied this subject into the kernel, just return
48697 + the reference to it, and don't copy it over again
48698 + */
48699 + if (s_tmp)
48700 + return(s_tmp);
48701 +
48702 + if ((s_tmp = (struct acl_subject_label *)
48703 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48704 + return ERR_PTR(-ENOMEM);
48705 +
48706 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48707 + if (subjmap == NULL)
48708 + return ERR_PTR(-ENOMEM);
48709 +
48710 + subjmap->user = userp;
48711 + subjmap->kernel = s_tmp;
48712 + insert_subj_map_entry(subjmap);
48713 +
48714 + if (copy_from_user(s_tmp, userp,
48715 + sizeof (struct acl_subject_label)))
48716 + return ERR_PTR(-EFAULT);
48717 +
48718 + len = strnlen_user(s_tmp->filename, PATH_MAX);
48719 +
48720 + if (!len || len >= PATH_MAX)
48721 + return ERR_PTR(-EINVAL);
48722 +
48723 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48724 + return ERR_PTR(-ENOMEM);
48725 +
48726 + if (copy_from_user(tmp, s_tmp->filename, len))
48727 + return ERR_PTR(-EFAULT);
48728 + tmp[len-1] = '\0';
48729 + s_tmp->filename = tmp;
48730 +
48731 + if (!strcmp(s_tmp->filename, "/"))
48732 + role->root_label = s_tmp;
48733 +
48734 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48735 + return ERR_PTR(-EFAULT);
48736 +
48737 + /* copy user and group transition tables */
48738 +
48739 + if (s_tmp->user_trans_num) {
48740 + uid_t *uidlist;
48741 +
48742 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48743 + if (uidlist == NULL)
48744 + return ERR_PTR(-ENOMEM);
48745 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48746 + return ERR_PTR(-EFAULT);
48747 +
48748 + s_tmp->user_transitions = uidlist;
48749 + }
48750 +
48751 + if (s_tmp->group_trans_num) {
48752 + gid_t *gidlist;
48753 +
48754 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48755 + if (gidlist == NULL)
48756 + return ERR_PTR(-ENOMEM);
48757 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48758 + return ERR_PTR(-EFAULT);
48759 +
48760 + s_tmp->group_transitions = gidlist;
48761 + }
48762 +
48763 + /* set up object hash table */
48764 + num_objs = count_user_objs(ghash.first);
48765 +
48766 + s_tmp->obj_hash_size = num_objs;
48767 + s_tmp->obj_hash =
48768 + (struct acl_object_label **)
48769 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48770 +
48771 + if (!s_tmp->obj_hash)
48772 + return ERR_PTR(-ENOMEM);
48773 +
48774 + memset(s_tmp->obj_hash, 0,
48775 + s_tmp->obj_hash_size *
48776 + sizeof (struct acl_object_label *));
48777 +
48778 + /* add in objects */
48779 + err = copy_user_objs(ghash.first, s_tmp, role);
48780 +
48781 + if (err)
48782 + return ERR_PTR(err);
48783 +
48784 + /* set pointer for parent subject */
48785 + if (s_tmp->parent_subject) {
48786 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48787 +
48788 + if (IS_ERR(s_tmp2))
48789 + return s_tmp2;
48790 +
48791 + s_tmp->parent_subject = s_tmp2;
48792 + }
48793 +
48794 + /* add in ip acls */
48795 +
48796 + if (!s_tmp->ip_num) {
48797 + s_tmp->ips = NULL;
48798 + goto insert;
48799 + }
48800 +
48801 + i_tmp =
48802 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48803 + sizeof (struct acl_ip_label *));
48804 +
48805 + if (!i_tmp)
48806 + return ERR_PTR(-ENOMEM);
48807 +
48808 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48809 + *(i_tmp + i_num) =
48810 + (struct acl_ip_label *)
48811 + acl_alloc(sizeof (struct acl_ip_label));
48812 + if (!*(i_tmp + i_num))
48813 + return ERR_PTR(-ENOMEM);
48814 +
48815 + if (copy_from_user
48816 + (&i_utmp2, s_tmp->ips + i_num,
48817 + sizeof (struct acl_ip_label *)))
48818 + return ERR_PTR(-EFAULT);
48819 +
48820 + if (copy_from_user
48821 + (*(i_tmp + i_num), i_utmp2,
48822 + sizeof (struct acl_ip_label)))
48823 + return ERR_PTR(-EFAULT);
48824 +
48825 + if ((*(i_tmp + i_num))->iface == NULL)
48826 + continue;
48827 +
48828 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48829 + if (!len || len >= IFNAMSIZ)
48830 + return ERR_PTR(-EINVAL);
48831 + tmp = acl_alloc(len);
48832 + if (tmp == NULL)
48833 + return ERR_PTR(-ENOMEM);
48834 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48835 + return ERR_PTR(-EFAULT);
48836 + (*(i_tmp + i_num))->iface = tmp;
48837 + }
48838 +
48839 + s_tmp->ips = i_tmp;
48840 +
48841 +insert:
48842 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48843 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48844 + return ERR_PTR(-ENOMEM);
48845 +
48846 + return s_tmp;
48847 +}
48848 +
48849 +static int
48850 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48851 +{
48852 + struct acl_subject_label s_pre;
48853 + struct acl_subject_label * ret;
48854 + int err;
48855 +
48856 + while (userp) {
48857 + if (copy_from_user(&s_pre, userp,
48858 + sizeof (struct acl_subject_label)))
48859 + return -EFAULT;
48860 +
48861 + /* do not add nested subjects here, add
48862 + while parsing objects
48863 + */
48864 +
48865 + if (s_pre.mode & GR_NESTED) {
48866 + userp = s_pre.prev;
48867 + continue;
48868 + }
48869 +
48870 + ret = do_copy_user_subj(userp, role);
48871 +
48872 + err = PTR_ERR(ret);
48873 + if (IS_ERR(ret))
48874 + return err;
48875 +
48876 + insert_acl_subj_label(ret, role);
48877 +
48878 + userp = s_pre.prev;
48879 + }
48880 +
48881 + return 0;
48882 +}
48883 +
48884 +static int
48885 +copy_user_acl(struct gr_arg *arg)
48886 +{
48887 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48888 + struct sprole_pw *sptmp;
48889 + struct gr_hash_struct *ghash;
48890 + uid_t *domainlist;
48891 + unsigned int r_num;
48892 + unsigned int len;
48893 + char *tmp;
48894 + int err = 0;
48895 + __u16 i;
48896 + __u32 num_subjs;
48897 +
48898 + /* we need a default and kernel role */
48899 + if (arg->role_db.num_roles < 2)
48900 + return -EINVAL;
48901 +
48902 + /* copy special role authentication info from userspace */
48903 +
48904 + num_sprole_pws = arg->num_sprole_pws;
48905 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48906 +
48907 + if (!acl_special_roles) {
48908 + err = -ENOMEM;
48909 + goto cleanup;
48910 + }
48911 +
48912 + for (i = 0; i < num_sprole_pws; i++) {
48913 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48914 + if (!sptmp) {
48915 + err = -ENOMEM;
48916 + goto cleanup;
48917 + }
48918 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48919 + sizeof (struct sprole_pw))) {
48920 + err = -EFAULT;
48921 + goto cleanup;
48922 + }
48923 +
48924 + len =
48925 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48926 +
48927 + if (!len || len >= GR_SPROLE_LEN) {
48928 + err = -EINVAL;
48929 + goto cleanup;
48930 + }
48931 +
48932 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48933 + err = -ENOMEM;
48934 + goto cleanup;
48935 + }
48936 +
48937 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48938 + err = -EFAULT;
48939 + goto cleanup;
48940 + }
48941 + tmp[len-1] = '\0';
48942 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48943 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48944 +#endif
48945 + sptmp->rolename = tmp;
48946 + acl_special_roles[i] = sptmp;
48947 + }
48948 +
48949 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48950 +
48951 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48952 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48953 +
48954 + if (!r_tmp) {
48955 + err = -ENOMEM;
48956 + goto cleanup;
48957 + }
48958 +
48959 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48960 + sizeof (struct acl_role_label *))) {
48961 + err = -EFAULT;
48962 + goto cleanup;
48963 + }
48964 +
48965 + if (copy_from_user(r_tmp, r_utmp2,
48966 + sizeof (struct acl_role_label))) {
48967 + err = -EFAULT;
48968 + goto cleanup;
48969 + }
48970 +
48971 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48972 +
48973 + if (!len || len >= PATH_MAX) {
48974 + err = -EINVAL;
48975 + goto cleanup;
48976 + }
48977 +
48978 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48979 + err = -ENOMEM;
48980 + goto cleanup;
48981 + }
48982 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48983 + err = -EFAULT;
48984 + goto cleanup;
48985 + }
48986 + tmp[len-1] = '\0';
48987 + r_tmp->rolename = tmp;
48988 +
48989 + if (!strcmp(r_tmp->rolename, "default")
48990 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48991 + default_role = r_tmp;
48992 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48993 + kernel_role = r_tmp;
48994 + }
48995 +
48996 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48997 + err = -ENOMEM;
48998 + goto cleanup;
48999 + }
49000 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49001 + err = -EFAULT;
49002 + goto cleanup;
49003 + }
49004 +
49005 + r_tmp->hash = ghash;
49006 +
49007 + num_subjs = count_user_subjs(r_tmp->hash->first);
49008 +
49009 + r_tmp->subj_hash_size = num_subjs;
49010 + r_tmp->subj_hash =
49011 + (struct acl_subject_label **)
49012 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49013 +
49014 + if (!r_tmp->subj_hash) {
49015 + err = -ENOMEM;
49016 + goto cleanup;
49017 + }
49018 +
49019 + err = copy_user_allowedips(r_tmp);
49020 + if (err)
49021 + goto cleanup;
49022 +
49023 + /* copy domain info */
49024 + if (r_tmp->domain_children != NULL) {
49025 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49026 + if (domainlist == NULL) {
49027 + err = -ENOMEM;
49028 + goto cleanup;
49029 + }
49030 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49031 + err = -EFAULT;
49032 + goto cleanup;
49033 + }
49034 + r_tmp->domain_children = domainlist;
49035 + }
49036 +
49037 + err = copy_user_transitions(r_tmp);
49038 + if (err)
49039 + goto cleanup;
49040 +
49041 + memset(r_tmp->subj_hash, 0,
49042 + r_tmp->subj_hash_size *
49043 + sizeof (struct acl_subject_label *));
49044 +
49045 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49046 +
49047 + if (err)
49048 + goto cleanup;
49049 +
49050 + /* set nested subject list to null */
49051 + r_tmp->hash->first = NULL;
49052 +
49053 + insert_acl_role_label(r_tmp);
49054 + }
49055 +
49056 + goto return_err;
49057 + cleanup:
49058 + free_variables();
49059 + return_err:
49060 + return err;
49061 +
49062 +}
49063 +
49064 +static int
49065 +gracl_init(struct gr_arg *args)
49066 +{
49067 + int error = 0;
49068 +
49069 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49070 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49071 +
49072 + if (init_variables(args)) {
49073 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49074 + error = -ENOMEM;
49075 + free_variables();
49076 + goto out;
49077 + }
49078 +
49079 + error = copy_user_acl(args);
49080 + free_init_variables();
49081 + if (error) {
49082 + free_variables();
49083 + goto out;
49084 + }
49085 +
49086 + if ((error = gr_set_acls(0))) {
49087 + free_variables();
49088 + goto out;
49089 + }
49090 +
49091 + pax_open_kernel();
49092 + gr_status |= GR_READY;
49093 + pax_close_kernel();
49094 +
49095 + out:
49096 + return error;
49097 +}
49098 +
49099 +/* derived from glibc fnmatch() 0: match, 1: no match*/
49100 +
49101 +static int
49102 +glob_match(const char *p, const char *n)
49103 +{
49104 + char c;
49105 +
49106 + while ((c = *p++) != '\0') {
49107 + switch (c) {
49108 + case '?':
49109 + if (*n == '\0')
49110 + return 1;
49111 + else if (*n == '/')
49112 + return 1;
49113 + break;
49114 + case '\\':
49115 + if (*n != c)
49116 + return 1;
49117 + break;
49118 + case '*':
49119 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
49120 + if (*n == '/')
49121 + return 1;
49122 + else if (c == '?') {
49123 + if (*n == '\0')
49124 + return 1;
49125 + else
49126 + ++n;
49127 + }
49128 + }
49129 + if (c == '\0') {
49130 + return 0;
49131 + } else {
49132 + const char *endp;
49133 +
49134 + if ((endp = strchr(n, '/')) == NULL)
49135 + endp = n + strlen(n);
49136 +
49137 + if (c == '[') {
49138 + for (--p; n < endp; ++n)
49139 + if (!glob_match(p, n))
49140 + return 0;
49141 + } else if (c == '/') {
49142 + while (*n != '\0' && *n != '/')
49143 + ++n;
49144 + if (*n == '/' && !glob_match(p, n + 1))
49145 + return 0;
49146 + } else {
49147 + for (--p; n < endp; ++n)
49148 + if (*n == c && !glob_match(p, n))
49149 + return 0;
49150 + }
49151 +
49152 + return 1;
49153 + }
49154 + case '[':
49155 + {
49156 + int not;
49157 + char cold;
49158 +
49159 + if (*n == '\0' || *n == '/')
49160 + return 1;
49161 +
49162 + not = (*p == '!' || *p == '^');
49163 + if (not)
49164 + ++p;
49165 +
49166 + c = *p++;
49167 + for (;;) {
49168 + unsigned char fn = (unsigned char)*n;
49169 +
49170 + if (c == '\0')
49171 + return 1;
49172 + else {
49173 + if (c == fn)
49174 + goto matched;
49175 + cold = c;
49176 + c = *p++;
49177 +
49178 + if (c == '-' && *p != ']') {
49179 + unsigned char cend = *p++;
49180 +
49181 + if (cend == '\0')
49182 + return 1;
49183 +
49184 + if (cold <= fn && fn <= cend)
49185 + goto matched;
49186 +
49187 + c = *p++;
49188 + }
49189 + }
49190 +
49191 + if (c == ']')
49192 + break;
49193 + }
49194 + if (!not)
49195 + return 1;
49196 + break;
49197 + matched:
49198 + while (c != ']') {
49199 + if (c == '\0')
49200 + return 1;
49201 +
49202 + c = *p++;
49203 + }
49204 + if (not)
49205 + return 1;
49206 + }
49207 + break;
49208 + default:
49209 + if (c != *n)
49210 + return 1;
49211 + }
49212 +
49213 + ++n;
49214 + }
49215 +
49216 + if (*n == '\0')
49217 + return 0;
49218 +
49219 + if (*n == '/')
49220 + return 0;
49221 +
49222 + return 1;
49223 +}
49224 +
49225 +static struct acl_object_label *
49226 +chk_glob_label(struct acl_object_label *globbed,
49227 + struct dentry *dentry, struct vfsmount *mnt, char **path)
49228 +{
49229 + struct acl_object_label *tmp;
49230 +
49231 + if (*path == NULL)
49232 + *path = gr_to_filename_nolock(dentry, mnt);
49233 +
49234 + tmp = globbed;
49235 +
49236 + while (tmp) {
49237 + if (!glob_match(tmp->filename, *path))
49238 + return tmp;
49239 + tmp = tmp->next;
49240 + }
49241 +
49242 + return NULL;
49243 +}
49244 +
49245 +static struct acl_object_label *
49246 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49247 + const ino_t curr_ino, const dev_t curr_dev,
49248 + const struct acl_subject_label *subj, char **path, const int checkglob)
49249 +{
49250 + struct acl_subject_label *tmpsubj;
49251 + struct acl_object_label *retval;
49252 + struct acl_object_label *retval2;
49253 +
49254 + tmpsubj = (struct acl_subject_label *) subj;
49255 + read_lock(&gr_inode_lock);
49256 + do {
49257 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49258 + if (retval) {
49259 + if (checkglob && retval->globbed) {
49260 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49261 + (struct vfsmount *)orig_mnt, path);
49262 + if (retval2)
49263 + retval = retval2;
49264 + }
49265 + break;
49266 + }
49267 + } while ((tmpsubj = tmpsubj->parent_subject));
49268 + read_unlock(&gr_inode_lock);
49269 +
49270 + return retval;
49271 +}
49272 +
49273 +static __inline__ struct acl_object_label *
49274 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49275 + struct dentry *curr_dentry,
49276 + const struct acl_subject_label *subj, char **path, const int checkglob)
49277 +{
49278 + int newglob = checkglob;
49279 + ino_t inode;
49280 + dev_t device;
49281 +
49282 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49283 + as we don't want a / * rule to match instead of the / object
49284 + don't do this for create lookups that call this function though, since they're looking up
49285 + on the parent and thus need globbing checks on all paths
49286 + */
49287 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49288 + newglob = GR_NO_GLOB;
49289 +
49290 + spin_lock(&curr_dentry->d_lock);
49291 + inode = curr_dentry->d_inode->i_ino;
49292 + device = __get_dev(curr_dentry);
49293 + spin_unlock(&curr_dentry->d_lock);
49294 +
49295 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49296 +}
49297 +
49298 +static struct acl_object_label *
49299 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49300 + const struct acl_subject_label *subj, char *path, const int checkglob)
49301 +{
49302 + struct dentry *dentry = (struct dentry *) l_dentry;
49303 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49304 + struct acl_object_label *retval;
49305 + struct dentry *parent;
49306 +
49307 + write_seqlock(&rename_lock);
49308 + br_read_lock(vfsmount_lock);
49309 +
49310 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49311 +#ifdef CONFIG_NET
49312 + mnt == sock_mnt ||
49313 +#endif
49314 +#ifdef CONFIG_HUGETLBFS
49315 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49316 +#endif
49317 + /* ignore Eric Biederman */
49318 + IS_PRIVATE(l_dentry->d_inode))) {
49319 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49320 + goto out;
49321 + }
49322 +
49323 + for (;;) {
49324 + if (dentry == real_root.dentry && mnt == real_root.mnt)
49325 + break;
49326 +
49327 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49328 + if (mnt->mnt_parent == mnt)
49329 + break;
49330 +
49331 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49332 + if (retval != NULL)
49333 + goto out;
49334 +
49335 + dentry = mnt->mnt_mountpoint;
49336 + mnt = mnt->mnt_parent;
49337 + continue;
49338 + }
49339 +
49340 + parent = dentry->d_parent;
49341 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49342 + if (retval != NULL)
49343 + goto out;
49344 +
49345 + dentry = parent;
49346 + }
49347 +
49348 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49349 +
49350 + /* real_root is pinned so we don't have to hold a reference */
49351 + if (retval == NULL)
49352 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
49353 +out:
49354 + br_read_unlock(vfsmount_lock);
49355 + write_sequnlock(&rename_lock);
49356 +
49357 + BUG_ON(retval == NULL);
49358 +
49359 + return retval;
49360 +}
49361 +
49362 +static __inline__ struct acl_object_label *
49363 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49364 + const struct acl_subject_label *subj)
49365 +{
49366 + char *path = NULL;
49367 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49368 +}
49369 +
49370 +static __inline__ struct acl_object_label *
49371 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49372 + const struct acl_subject_label *subj)
49373 +{
49374 + char *path = NULL;
49375 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49376 +}
49377 +
49378 +static __inline__ struct acl_object_label *
49379 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49380 + const struct acl_subject_label *subj, char *path)
49381 +{
49382 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49383 +}
49384 +
49385 +static struct acl_subject_label *
49386 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49387 + const struct acl_role_label *role)
49388 +{
49389 + struct dentry *dentry = (struct dentry *) l_dentry;
49390 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49391 + struct acl_subject_label *retval;
49392 + struct dentry *parent;
49393 +
49394 + write_seqlock(&rename_lock);
49395 + br_read_lock(vfsmount_lock);
49396 +
49397 + for (;;) {
49398 + if (dentry == real_root.dentry && mnt == real_root.mnt)
49399 + break;
49400 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49401 + if (mnt->mnt_parent == mnt)
49402 + break;
49403 +
49404 + spin_lock(&dentry->d_lock);
49405 + read_lock(&gr_inode_lock);
49406 + retval =
49407 + lookup_acl_subj_label(dentry->d_inode->i_ino,
49408 + __get_dev(dentry), role);
49409 + read_unlock(&gr_inode_lock);
49410 + spin_unlock(&dentry->d_lock);
49411 + if (retval != NULL)
49412 + goto out;
49413 +
49414 + dentry = mnt->mnt_mountpoint;
49415 + mnt = mnt->mnt_parent;
49416 + continue;
49417 + }
49418 +
49419 + spin_lock(&dentry->d_lock);
49420 + read_lock(&gr_inode_lock);
49421 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49422 + __get_dev(dentry), role);
49423 + read_unlock(&gr_inode_lock);
49424 + parent = dentry->d_parent;
49425 + spin_unlock(&dentry->d_lock);
49426 +
49427 + if (retval != NULL)
49428 + goto out;
49429 +
49430 + dentry = parent;
49431 + }
49432 +
49433 + spin_lock(&dentry->d_lock);
49434 + read_lock(&gr_inode_lock);
49435 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49436 + __get_dev(dentry), role);
49437 + read_unlock(&gr_inode_lock);
49438 + spin_unlock(&dentry->d_lock);
49439 +
49440 + if (unlikely(retval == NULL)) {
49441 + /* real_root is pinned, we don't need to hold a reference */
49442 + read_lock(&gr_inode_lock);
49443 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
49444 + __get_dev(real_root.dentry), role);
49445 + read_unlock(&gr_inode_lock);
49446 + }
49447 +out:
49448 + br_read_unlock(vfsmount_lock);
49449 + write_sequnlock(&rename_lock);
49450 +
49451 + BUG_ON(retval == NULL);
49452 +
49453 + return retval;
49454 +}
49455 +
49456 +static void
49457 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49458 +{
49459 + struct task_struct *task = current;
49460 + const struct cred *cred = current_cred();
49461 +
49462 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49463 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49464 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49465 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49466 +
49467 + return;
49468 +}
49469 +
49470 +static void
49471 +gr_log_learn_sysctl(const char *path, const __u32 mode)
49472 +{
49473 + struct task_struct *task = current;
49474 + const struct cred *cred = current_cred();
49475 +
49476 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49477 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49478 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49479 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49480 +
49481 + return;
49482 +}
49483 +
49484 +static void
49485 +gr_log_learn_id_change(const char type, const unsigned int real,
49486 + const unsigned int effective, const unsigned int fs)
49487 +{
49488 + struct task_struct *task = current;
49489 + const struct cred *cred = current_cred();
49490 +
49491 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49492 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49493 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49494 + type, real, effective, fs, &task->signal->saved_ip);
49495 +
49496 + return;
49497 +}
49498 +
49499 +__u32
49500 +gr_search_file(const struct dentry * dentry, const __u32 mode,
49501 + const struct vfsmount * mnt)
49502 +{
49503 + __u32 retval = mode;
49504 + struct acl_subject_label *curracl;
49505 + struct acl_object_label *currobj;
49506 +
49507 + if (unlikely(!(gr_status & GR_READY)))
49508 + return (mode & ~GR_AUDITS);
49509 +
49510 + curracl = current->acl;
49511 +
49512 + currobj = chk_obj_label(dentry, mnt, curracl);
49513 + retval = currobj->mode & mode;
49514 +
49515 + /* if we're opening a specified transfer file for writing
49516 + (e.g. /dev/initctl), then transfer our role to init
49517 + */
49518 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49519 + current->role->roletype & GR_ROLE_PERSIST)) {
49520 + struct task_struct *task = init_pid_ns.child_reaper;
49521 +
49522 + if (task->role != current->role) {
49523 + task->acl_sp_role = 0;
49524 + task->acl_role_id = current->acl_role_id;
49525 + task->role = current->role;
49526 + rcu_read_lock();
49527 + read_lock(&grsec_exec_file_lock);
49528 + gr_apply_subject_to_task(task);
49529 + read_unlock(&grsec_exec_file_lock);
49530 + rcu_read_unlock();
49531 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49532 + }
49533 + }
49534 +
49535 + if (unlikely
49536 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49537 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49538 + __u32 new_mode = mode;
49539 +
49540 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49541 +
49542 + retval = new_mode;
49543 +
49544 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49545 + new_mode |= GR_INHERIT;
49546 +
49547 + if (!(mode & GR_NOLEARN))
49548 + gr_log_learn(dentry, mnt, new_mode);
49549 + }
49550 +
49551 + return retval;
49552 +}
49553 +
49554 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
49555 + const struct dentry *parent,
49556 + const struct vfsmount *mnt)
49557 +{
49558 + struct name_entry *match;
49559 + struct acl_object_label *matchpo;
49560 + struct acl_subject_label *curracl;
49561 + char *path;
49562 +
49563 + if (unlikely(!(gr_status & GR_READY)))
49564 + return NULL;
49565 +
49566 + preempt_disable();
49567 + path = gr_to_filename_rbac(new_dentry, mnt);
49568 + match = lookup_name_entry_create(path);
49569 +
49570 + curracl = current->acl;
49571 +
49572 + if (match) {
49573 + read_lock(&gr_inode_lock);
49574 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49575 + read_unlock(&gr_inode_lock);
49576 +
49577 + if (matchpo) {
49578 + preempt_enable();
49579 + return matchpo;
49580 + }
49581 + }
49582 +
49583 + // lookup parent
49584 +
49585 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49586 +
49587 + preempt_enable();
49588 + return matchpo;
49589 +}
49590 +
49591 +__u32
49592 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49593 + const struct vfsmount * mnt, const __u32 mode)
49594 +{
49595 + struct acl_object_label *matchpo;
49596 + __u32 retval;
49597 +
49598 + if (unlikely(!(gr_status & GR_READY)))
49599 + return (mode & ~GR_AUDITS);
49600 +
49601 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
49602 +
49603 + retval = matchpo->mode & mode;
49604 +
49605 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49606 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49607 + __u32 new_mode = mode;
49608 +
49609 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49610 +
49611 + gr_log_learn(new_dentry, mnt, new_mode);
49612 + return new_mode;
49613 + }
49614 +
49615 + return retval;
49616 +}
49617 +
49618 +__u32
49619 +gr_check_link(const struct dentry * new_dentry,
49620 + const struct dentry * parent_dentry,
49621 + const struct vfsmount * parent_mnt,
49622 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49623 +{
49624 + struct acl_object_label *obj;
49625 + __u32 oldmode, newmode;
49626 + __u32 needmode;
49627 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
49628 + GR_DELETE | GR_INHERIT;
49629 +
49630 + if (unlikely(!(gr_status & GR_READY)))
49631 + return (GR_CREATE | GR_LINK);
49632 +
49633 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49634 + oldmode = obj->mode;
49635 +
49636 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
49637 + newmode = obj->mode;
49638 +
49639 + needmode = newmode & checkmodes;
49640 +
49641 + // old name for hardlink must have at least the permissions of the new name
49642 + if ((oldmode & needmode) != needmode)
49643 + goto bad;
49644 +
49645 + // if old name had restrictions/auditing, make sure the new name does as well
49646 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49647 +
49648 + // don't allow hardlinking of suid/sgid files without permission
49649 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49650 + needmode |= GR_SETID;
49651 +
49652 + if ((newmode & needmode) != needmode)
49653 + goto bad;
49654 +
49655 + // enforce minimum permissions
49656 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49657 + return newmode;
49658 +bad:
49659 + needmode = oldmode;
49660 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49661 + needmode |= GR_SETID;
49662 +
49663 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49664 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49665 + return (GR_CREATE | GR_LINK);
49666 + } else if (newmode & GR_SUPPRESS)
49667 + return GR_SUPPRESS;
49668 + else
49669 + return 0;
49670 +}
49671 +
49672 +int
49673 +gr_check_hidden_task(const struct task_struct *task)
49674 +{
49675 + if (unlikely(!(gr_status & GR_READY)))
49676 + return 0;
49677 +
49678 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49679 + return 1;
49680 +
49681 + return 0;
49682 +}
49683 +
49684 +int
49685 +gr_check_protected_task(const struct task_struct *task)
49686 +{
49687 + if (unlikely(!(gr_status & GR_READY) || !task))
49688 + return 0;
49689 +
49690 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49691 + task->acl != current->acl)
49692 + return 1;
49693 +
49694 + return 0;
49695 +}
49696 +
49697 +int
49698 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49699 +{
49700 + struct task_struct *p;
49701 + int ret = 0;
49702 +
49703 + if (unlikely(!(gr_status & GR_READY) || !pid))
49704 + return ret;
49705 +
49706 + read_lock(&tasklist_lock);
49707 + do_each_pid_task(pid, type, p) {
49708 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49709 + p->acl != current->acl) {
49710 + ret = 1;
49711 + goto out;
49712 + }
49713 + } while_each_pid_task(pid, type, p);
49714 +out:
49715 + read_unlock(&tasklist_lock);
49716 +
49717 + return ret;
49718 +}
49719 +
49720 +void
49721 +gr_copy_label(struct task_struct *tsk)
49722 +{
49723 + tsk->signal->used_accept = 0;
49724 + tsk->acl_sp_role = 0;
49725 + tsk->acl_role_id = current->acl_role_id;
49726 + tsk->acl = current->acl;
49727 + tsk->role = current->role;
49728 + tsk->signal->curr_ip = current->signal->curr_ip;
49729 + tsk->signal->saved_ip = current->signal->saved_ip;
49730 + if (current->exec_file)
49731 + get_file(current->exec_file);
49732 + tsk->exec_file = current->exec_file;
49733 + tsk->is_writable = current->is_writable;
49734 + if (unlikely(current->signal->used_accept)) {
49735 + current->signal->curr_ip = 0;
49736 + current->signal->saved_ip = 0;
49737 + }
49738 +
49739 + return;
49740 +}
49741 +
49742 +static void
49743 +gr_set_proc_res(struct task_struct *task)
49744 +{
49745 + struct acl_subject_label *proc;
49746 + unsigned short i;
49747 +
49748 + proc = task->acl;
49749 +
49750 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49751 + return;
49752 +
49753 + for (i = 0; i < RLIM_NLIMITS; i++) {
49754 + if (!(proc->resmask & (1 << i)))
49755 + continue;
49756 +
49757 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49758 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49759 + }
49760 +
49761 + return;
49762 +}
49763 +
49764 +extern int __gr_process_user_ban(struct user_struct *user);
49765 +
49766 +int
49767 +gr_check_user_change(int real, int effective, int fs)
49768 +{
49769 + unsigned int i;
49770 + __u16 num;
49771 + uid_t *uidlist;
49772 + int curuid;
49773 + int realok = 0;
49774 + int effectiveok = 0;
49775 + int fsok = 0;
49776 +
49777 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49778 + struct user_struct *user;
49779 +
49780 + if (real == -1)
49781 + goto skipit;
49782 +
49783 + user = find_user(real);
49784 + if (user == NULL)
49785 + goto skipit;
49786 +
49787 + if (__gr_process_user_ban(user)) {
49788 + /* for find_user */
49789 + free_uid(user);
49790 + return 1;
49791 + }
49792 +
49793 + /* for find_user */
49794 + free_uid(user);
49795 +
49796 +skipit:
49797 +#endif
49798 +
49799 + if (unlikely(!(gr_status & GR_READY)))
49800 + return 0;
49801 +
49802 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49803 + gr_log_learn_id_change('u', real, effective, fs);
49804 +
49805 + num = current->acl->user_trans_num;
49806 + uidlist = current->acl->user_transitions;
49807 +
49808 + if (uidlist == NULL)
49809 + return 0;
49810 +
49811 + if (real == -1)
49812 + realok = 1;
49813 + if (effective == -1)
49814 + effectiveok = 1;
49815 + if (fs == -1)
49816 + fsok = 1;
49817 +
49818 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
49819 + for (i = 0; i < num; i++) {
49820 + curuid = (int)uidlist[i];
49821 + if (real == curuid)
49822 + realok = 1;
49823 + if (effective == curuid)
49824 + effectiveok = 1;
49825 + if (fs == curuid)
49826 + fsok = 1;
49827 + }
49828 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
49829 + for (i = 0; i < num; i++) {
49830 + curuid = (int)uidlist[i];
49831 + if (real == curuid)
49832 + break;
49833 + if (effective == curuid)
49834 + break;
49835 + if (fs == curuid)
49836 + break;
49837 + }
49838 + /* not in deny list */
49839 + if (i == num) {
49840 + realok = 1;
49841 + effectiveok = 1;
49842 + fsok = 1;
49843 + }
49844 + }
49845 +
49846 + if (realok && effectiveok && fsok)
49847 + return 0;
49848 + else {
49849 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49850 + return 1;
49851 + }
49852 +}
49853 +
49854 +int
49855 +gr_check_group_change(int real, int effective, int fs)
49856 +{
49857 + unsigned int i;
49858 + __u16 num;
49859 + gid_t *gidlist;
49860 + int curgid;
49861 + int realok = 0;
49862 + int effectiveok = 0;
49863 + int fsok = 0;
49864 +
49865 + if (unlikely(!(gr_status & GR_READY)))
49866 + return 0;
49867 +
49868 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49869 + gr_log_learn_id_change('g', real, effective, fs);
49870 +
49871 + num = current->acl->group_trans_num;
49872 + gidlist = current->acl->group_transitions;
49873 +
49874 + if (gidlist == NULL)
49875 + return 0;
49876 +
49877 + if (real == -1)
49878 + realok = 1;
49879 + if (effective == -1)
49880 + effectiveok = 1;
49881 + if (fs == -1)
49882 + fsok = 1;
49883 +
49884 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
49885 + for (i = 0; i < num; i++) {
49886 + curgid = (int)gidlist[i];
49887 + if (real == curgid)
49888 + realok = 1;
49889 + if (effective == curgid)
49890 + effectiveok = 1;
49891 + if (fs == curgid)
49892 + fsok = 1;
49893 + }
49894 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
49895 + for (i = 0; i < num; i++) {
49896 + curgid = (int)gidlist[i];
49897 + if (real == curgid)
49898 + break;
49899 + if (effective == curgid)
49900 + break;
49901 + if (fs == curgid)
49902 + break;
49903 + }
49904 + /* not in deny list */
49905 + if (i == num) {
49906 + realok = 1;
49907 + effectiveok = 1;
49908 + fsok = 1;
49909 + }
49910 + }
49911 +
49912 + if (realok && effectiveok && fsok)
49913 + return 0;
49914 + else {
49915 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49916 + return 1;
49917 + }
49918 +}
49919 +
49920 +void
49921 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49922 +{
49923 + struct acl_role_label *role = task->role;
49924 + struct acl_subject_label *subj = NULL;
49925 + struct acl_object_label *obj;
49926 + struct file *filp;
49927 +
49928 + if (unlikely(!(gr_status & GR_READY)))
49929 + return;
49930 +
49931 + filp = task->exec_file;
49932 +
49933 + /* kernel process, we'll give them the kernel role */
49934 + if (unlikely(!filp)) {
49935 + task->role = kernel_role;
49936 + task->acl = kernel_role->root_label;
49937 + return;
49938 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49939 + role = lookup_acl_role_label(task, uid, gid);
49940 +
49941 + /* perform subject lookup in possibly new role
49942 + we can use this result below in the case where role == task->role
49943 + */
49944 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49945 +
49946 + /* if we changed uid/gid, but result in the same role
49947 + and are using inheritance, don't lose the inherited subject
49948 + if current subject is other than what normal lookup
49949 + would result in, we arrived via inheritance, don't
49950 + lose subject
49951 + */
49952 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49953 + (subj == task->acl)))
49954 + task->acl = subj;
49955 +
49956 + task->role = role;
49957 +
49958 + task->is_writable = 0;
49959 +
49960 + /* ignore additional mmap checks for processes that are writable
49961 + by the default ACL */
49962 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49963 + if (unlikely(obj->mode & GR_WRITE))
49964 + task->is_writable = 1;
49965 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49966 + if (unlikely(obj->mode & GR_WRITE))
49967 + task->is_writable = 1;
49968 +
49969 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49970 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49971 +#endif
49972 +
49973 + gr_set_proc_res(task);
49974 +
49975 + return;
49976 +}
49977 +
49978 +int
49979 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49980 + const int unsafe_share)
49981 +{
49982 + struct task_struct *task = current;
49983 + struct acl_subject_label *newacl;
49984 + struct acl_object_label *obj;
49985 + __u32 retmode;
49986 +
49987 + if (unlikely(!(gr_status & GR_READY)))
49988 + return 0;
49989 +
49990 + newacl = chk_subj_label(dentry, mnt, task->role);
49991 +
49992 + task_lock(task);
49993 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49994 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49995 + !(task->role->roletype & GR_ROLE_GOD) &&
49996 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49997 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49998 + task_unlock(task);
49999 + if (unsafe_share)
50000 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50001 + else
50002 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50003 + return -EACCES;
50004 + }
50005 + task_unlock(task);
50006 +
50007 + obj = chk_obj_label(dentry, mnt, task->acl);
50008 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50009 +
50010 + if (!(task->acl->mode & GR_INHERITLEARN) &&
50011 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50012 + if (obj->nested)
50013 + task->acl = obj->nested;
50014 + else
50015 + task->acl = newacl;
50016 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50017 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50018 +
50019 + task->is_writable = 0;
50020 +
50021 + /* ignore additional mmap checks for processes that are writable
50022 + by the default ACL */
50023 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
50024 + if (unlikely(obj->mode & GR_WRITE))
50025 + task->is_writable = 1;
50026 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
50027 + if (unlikely(obj->mode & GR_WRITE))
50028 + task->is_writable = 1;
50029 +
50030 + gr_set_proc_res(task);
50031 +
50032 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50033 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50034 +#endif
50035 + return 0;
50036 +}
50037 +
50038 +/* always called with valid inodev ptr */
50039 +static void
50040 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50041 +{
50042 + struct acl_object_label *matchpo;
50043 + struct acl_subject_label *matchps;
50044 + struct acl_subject_label *subj;
50045 + struct acl_role_label *role;
50046 + unsigned int x;
50047 +
50048 + FOR_EACH_ROLE_START(role)
50049 + FOR_EACH_SUBJECT_START(role, subj, x)
50050 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50051 + matchpo->mode |= GR_DELETED;
50052 + FOR_EACH_SUBJECT_END(subj,x)
50053 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50054 + if (subj->inode == ino && subj->device == dev)
50055 + subj->mode |= GR_DELETED;
50056 + FOR_EACH_NESTED_SUBJECT_END(subj)
50057 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50058 + matchps->mode |= GR_DELETED;
50059 + FOR_EACH_ROLE_END(role)
50060 +
50061 + inodev->nentry->deleted = 1;
50062 +
50063 + return;
50064 +}
50065 +
50066 +void
50067 +gr_handle_delete(const ino_t ino, const dev_t dev)
50068 +{
50069 + struct inodev_entry *inodev;
50070 +
50071 + if (unlikely(!(gr_status & GR_READY)))
50072 + return;
50073 +
50074 + write_lock(&gr_inode_lock);
50075 + inodev = lookup_inodev_entry(ino, dev);
50076 + if (inodev != NULL)
50077 + do_handle_delete(inodev, ino, dev);
50078 + write_unlock(&gr_inode_lock);
50079 +
50080 + return;
50081 +}
50082 +
50083 +static void
50084 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50085 + const ino_t newinode, const dev_t newdevice,
50086 + struct acl_subject_label *subj)
50087 +{
50088 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50089 + struct acl_object_label *match;
50090 +
50091 + match = subj->obj_hash[index];
50092 +
50093 + while (match && (match->inode != oldinode ||
50094 + match->device != olddevice ||
50095 + !(match->mode & GR_DELETED)))
50096 + match = match->next;
50097 +
50098 + if (match && (match->inode == oldinode)
50099 + && (match->device == olddevice)
50100 + && (match->mode & GR_DELETED)) {
50101 + if (match->prev == NULL) {
50102 + subj->obj_hash[index] = match->next;
50103 + if (match->next != NULL)
50104 + match->next->prev = NULL;
50105 + } else {
50106 + match->prev->next = match->next;
50107 + if (match->next != NULL)
50108 + match->next->prev = match->prev;
50109 + }
50110 + match->prev = NULL;
50111 + match->next = NULL;
50112 + match->inode = newinode;
50113 + match->device = newdevice;
50114 + match->mode &= ~GR_DELETED;
50115 +
50116 + insert_acl_obj_label(match, subj);
50117 + }
50118 +
50119 + return;
50120 +}
50121 +
50122 +static void
50123 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50124 + const ino_t newinode, const dev_t newdevice,
50125 + struct acl_role_label *role)
50126 +{
50127 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50128 + struct acl_subject_label *match;
50129 +
50130 + match = role->subj_hash[index];
50131 +
50132 + while (match && (match->inode != oldinode ||
50133 + match->device != olddevice ||
50134 + !(match->mode & GR_DELETED)))
50135 + match = match->next;
50136 +
50137 + if (match && (match->inode == oldinode)
50138 + && (match->device == olddevice)
50139 + && (match->mode & GR_DELETED)) {
50140 + if (match->prev == NULL) {
50141 + role->subj_hash[index] = match->next;
50142 + if (match->next != NULL)
50143 + match->next->prev = NULL;
50144 + } else {
50145 + match->prev->next = match->next;
50146 + if (match->next != NULL)
50147 + match->next->prev = match->prev;
50148 + }
50149 + match->prev = NULL;
50150 + match->next = NULL;
50151 + match->inode = newinode;
50152 + match->device = newdevice;
50153 + match->mode &= ~GR_DELETED;
50154 +
50155 + insert_acl_subj_label(match, role);
50156 + }
50157 +
50158 + return;
50159 +}
50160 +
50161 +static void
50162 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50163 + const ino_t newinode, const dev_t newdevice)
50164 +{
50165 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50166 + struct inodev_entry *match;
50167 +
50168 + match = inodev_set.i_hash[index];
50169 +
50170 + while (match && (match->nentry->inode != oldinode ||
50171 + match->nentry->device != olddevice || !match->nentry->deleted))
50172 + match = match->next;
50173 +
50174 + if (match && (match->nentry->inode == oldinode)
50175 + && (match->nentry->device == olddevice) &&
50176 + match->nentry->deleted) {
50177 + if (match->prev == NULL) {
50178 + inodev_set.i_hash[index] = match->next;
50179 + if (match->next != NULL)
50180 + match->next->prev = NULL;
50181 + } else {
50182 + match->prev->next = match->next;
50183 + if (match->next != NULL)
50184 + match->next->prev = match->prev;
50185 + }
50186 + match->prev = NULL;
50187 + match->next = NULL;
50188 + match->nentry->inode = newinode;
50189 + match->nentry->device = newdevice;
50190 + match->nentry->deleted = 0;
50191 +
50192 + insert_inodev_entry(match);
50193 + }
50194 +
50195 + return;
50196 +}
50197 +
50198 +static void
50199 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50200 +{
50201 + struct acl_subject_label *subj;
50202 + struct acl_role_label *role;
50203 + unsigned int x;
50204 +
50205 + FOR_EACH_ROLE_START(role)
50206 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50207 +
50208 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50209 + if ((subj->inode == ino) && (subj->device == dev)) {
50210 + subj->inode = ino;
50211 + subj->device = dev;
50212 + }
50213 + FOR_EACH_NESTED_SUBJECT_END(subj)
50214 + FOR_EACH_SUBJECT_START(role, subj, x)
50215 + update_acl_obj_label(matchn->inode, matchn->device,
50216 + ino, dev, subj);
50217 + FOR_EACH_SUBJECT_END(subj,x)
50218 + FOR_EACH_ROLE_END(role)
50219 +
50220 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50221 +
50222 + return;
50223 +}
50224 +
50225 +static void
50226 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50227 + const struct vfsmount *mnt)
50228 +{
50229 + ino_t ino = dentry->d_inode->i_ino;
50230 + dev_t dev = __get_dev(dentry);
50231 +
50232 + __do_handle_create(matchn, ino, dev);
50233 +
50234 + return;
50235 +}
50236 +
50237 +void
50238 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50239 +{
50240 + struct name_entry *matchn;
50241 +
50242 + if (unlikely(!(gr_status & GR_READY)))
50243 + return;
50244 +
50245 + preempt_disable();
50246 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50247 +
50248 + if (unlikely((unsigned long)matchn)) {
50249 + write_lock(&gr_inode_lock);
50250 + do_handle_create(matchn, dentry, mnt);
50251 + write_unlock(&gr_inode_lock);
50252 + }
50253 + preempt_enable();
50254 +
50255 + return;
50256 +}
50257 +
50258 +void
50259 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50260 +{
50261 + struct name_entry *matchn;
50262 +
50263 + if (unlikely(!(gr_status & GR_READY)))
50264 + return;
50265 +
50266 + preempt_disable();
50267 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50268 +
50269 + if (unlikely((unsigned long)matchn)) {
50270 + write_lock(&gr_inode_lock);
50271 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50272 + write_unlock(&gr_inode_lock);
50273 + }
50274 + preempt_enable();
50275 +
50276 + return;
50277 +}
50278 +
50279 +void
50280 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50281 + struct dentry *old_dentry,
50282 + struct dentry *new_dentry,
50283 + struct vfsmount *mnt, const __u8 replace)
50284 +{
50285 + struct name_entry *matchn;
50286 + struct inodev_entry *inodev;
50287 + ino_t old_ino = old_dentry->d_inode->i_ino;
50288 + dev_t old_dev = __get_dev(old_dentry);
50289 +
50290 + /* vfs_rename swaps the name and parent link for old_dentry and
50291 + new_dentry
50292 + at this point, old_dentry has the new name, parent link, and inode
50293 + for the renamed file
50294 + if a file is being replaced by a rename, new_dentry has the inode
50295 + and name for the replaced file
50296 + */
50297 +
50298 + if (unlikely(!(gr_status & GR_READY)))
50299 + return;
50300 +
50301 + preempt_disable();
50302 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50303 +
50304 + /* we wouldn't have to check d_inode if it weren't for
50305 + NFS silly-renaming
50306 + */
50307 +
50308 + write_lock(&gr_inode_lock);
50309 + if (unlikely(replace && new_dentry->d_inode)) {
50310 + ino_t new_ino = new_dentry->d_inode->i_ino;
50311 + dev_t new_dev = __get_dev(new_dentry);
50312 +
50313 + inodev = lookup_inodev_entry(new_ino, new_dev);
50314 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
50315 + do_handle_delete(inodev, new_ino, new_dev);
50316 + }
50317 +
50318 + inodev = lookup_inodev_entry(old_ino, old_dev);
50319 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
50320 + do_handle_delete(inodev, old_ino, old_dev);
50321 +
50322 + if (unlikely((unsigned long)matchn))
50323 + do_handle_create(matchn, old_dentry, mnt);
50324 +
50325 + write_unlock(&gr_inode_lock);
50326 + preempt_enable();
50327 +
50328 + return;
50329 +}
50330 +
50331 +static int
50332 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50333 + unsigned char **sum)
50334 +{
50335 + struct acl_role_label *r;
50336 + struct role_allowed_ip *ipp;
50337 + struct role_transition *trans;
50338 + unsigned int i;
50339 + int found = 0;
50340 + u32 curr_ip = current->signal->curr_ip;
50341 +
50342 + current->signal->saved_ip = curr_ip;
50343 +
50344 + /* check transition table */
50345 +
50346 + for (trans = current->role->transitions; trans; trans = trans->next) {
50347 + if (!strcmp(rolename, trans->rolename)) {
50348 + found = 1;
50349 + break;
50350 + }
50351 + }
50352 +
50353 + if (!found)
50354 + return 0;
50355 +
50356 + /* handle special roles that do not require authentication
50357 + and check ip */
50358 +
50359 + FOR_EACH_ROLE_START(r)
50360 + if (!strcmp(rolename, r->rolename) &&
50361 + (r->roletype & GR_ROLE_SPECIAL)) {
50362 + found = 0;
50363 + if (r->allowed_ips != NULL) {
50364 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50365 + if ((ntohl(curr_ip) & ipp->netmask) ==
50366 + (ntohl(ipp->addr) & ipp->netmask))
50367 + found = 1;
50368 + }
50369 + } else
50370 + found = 2;
50371 + if (!found)
50372 + return 0;
50373 +
50374 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50375 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50376 + *salt = NULL;
50377 + *sum = NULL;
50378 + return 1;
50379 + }
50380 + }
50381 + FOR_EACH_ROLE_END(r)
50382 +
50383 + for (i = 0; i < num_sprole_pws; i++) {
50384 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50385 + *salt = acl_special_roles[i]->salt;
50386 + *sum = acl_special_roles[i]->sum;
50387 + return 1;
50388 + }
50389 + }
50390 +
50391 + return 0;
50392 +}
50393 +
50394 +static void
50395 +assign_special_role(char *rolename)
50396 +{
50397 + struct acl_object_label *obj;
50398 + struct acl_role_label *r;
50399 + struct acl_role_label *assigned = NULL;
50400 + struct task_struct *tsk;
50401 + struct file *filp;
50402 +
50403 + FOR_EACH_ROLE_START(r)
50404 + if (!strcmp(rolename, r->rolename) &&
50405 + (r->roletype & GR_ROLE_SPECIAL)) {
50406 + assigned = r;
50407 + break;
50408 + }
50409 + FOR_EACH_ROLE_END(r)
50410 +
50411 + if (!assigned)
50412 + return;
50413 +
50414 + read_lock(&tasklist_lock);
50415 + read_lock(&grsec_exec_file_lock);
50416 +
50417 + tsk = current->real_parent;
50418 + if (tsk == NULL)
50419 + goto out_unlock;
50420 +
50421 + filp = tsk->exec_file;
50422 + if (filp == NULL)
50423 + goto out_unlock;
50424 +
50425 + tsk->is_writable = 0;
50426 +
50427 + tsk->acl_sp_role = 1;
50428 + tsk->acl_role_id = ++acl_sp_role_value;
50429 + tsk->role = assigned;
50430 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50431 +
50432 + /* ignore additional mmap checks for processes that are writable
50433 + by the default ACL */
50434 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50435 + if (unlikely(obj->mode & GR_WRITE))
50436 + tsk->is_writable = 1;
50437 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50438 + if (unlikely(obj->mode & GR_WRITE))
50439 + tsk->is_writable = 1;
50440 +
50441 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50442 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50443 +#endif
50444 +
50445 +out_unlock:
50446 + read_unlock(&grsec_exec_file_lock);
50447 + read_unlock(&tasklist_lock);
50448 + return;
50449 +}
50450 +
50451 +int gr_check_secure_terminal(struct task_struct *task)
50452 +{
50453 + struct task_struct *p, *p2, *p3;
50454 + struct files_struct *files;
50455 + struct fdtable *fdt;
50456 + struct file *our_file = NULL, *file;
50457 + int i;
50458 +
50459 + if (task->signal->tty == NULL)
50460 + return 1;
50461 +
50462 + files = get_files_struct(task);
50463 + if (files != NULL) {
50464 + rcu_read_lock();
50465 + fdt = files_fdtable(files);
50466 + for (i=0; i < fdt->max_fds; i++) {
50467 + file = fcheck_files(files, i);
50468 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50469 + get_file(file);
50470 + our_file = file;
50471 + }
50472 + }
50473 + rcu_read_unlock();
50474 + put_files_struct(files);
50475 + }
50476 +
50477 + if (our_file == NULL)
50478 + return 1;
50479 +
50480 + read_lock(&tasklist_lock);
50481 + do_each_thread(p2, p) {
50482 + files = get_files_struct(p);
50483 + if (files == NULL ||
50484 + (p->signal && p->signal->tty == task->signal->tty)) {
50485 + if (files != NULL)
50486 + put_files_struct(files);
50487 + continue;
50488 + }
50489 + rcu_read_lock();
50490 + fdt = files_fdtable(files);
50491 + for (i=0; i < fdt->max_fds; i++) {
50492 + file = fcheck_files(files, i);
50493 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50494 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50495 + p3 = task;
50496 + while (p3->pid > 0) {
50497 + if (p3 == p)
50498 + break;
50499 + p3 = p3->real_parent;
50500 + }
50501 + if (p3 == p)
50502 + break;
50503 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50504 + gr_handle_alertkill(p);
50505 + rcu_read_unlock();
50506 + put_files_struct(files);
50507 + read_unlock(&tasklist_lock);
50508 + fput(our_file);
50509 + return 0;
50510 + }
50511 + }
50512 + rcu_read_unlock();
50513 + put_files_struct(files);
50514 + } while_each_thread(p2, p);
50515 + read_unlock(&tasklist_lock);
50516 +
50517 + fput(our_file);
50518 + return 1;
50519 +}
50520 +
50521 +ssize_t
50522 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50523 +{
50524 + struct gr_arg_wrapper uwrap;
50525 + unsigned char *sprole_salt = NULL;
50526 + unsigned char *sprole_sum = NULL;
50527 + int error = sizeof (struct gr_arg_wrapper);
50528 + int error2 = 0;
50529 +
50530 + mutex_lock(&gr_dev_mutex);
50531 +
50532 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50533 + error = -EPERM;
50534 + goto out;
50535 + }
50536 +
50537 + if (count != sizeof (struct gr_arg_wrapper)) {
50538 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50539 + error = -EINVAL;
50540 + goto out;
50541 + }
50542 +
50543 +
50544 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50545 + gr_auth_expires = 0;
50546 + gr_auth_attempts = 0;
50547 + }
50548 +
50549 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50550 + error = -EFAULT;
50551 + goto out;
50552 + }
50553 +
50554 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50555 + error = -EINVAL;
50556 + goto out;
50557 + }
50558 +
50559 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50560 + error = -EFAULT;
50561 + goto out;
50562 + }
50563 +
50564 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50565 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50566 + time_after(gr_auth_expires, get_seconds())) {
50567 + error = -EBUSY;
50568 + goto out;
50569 + }
50570 +
50571 + /* if non-root trying to do anything other than use a special role,
50572 + do not attempt authentication, do not count towards authentication
50573 + locking
50574 + */
50575 +
50576 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50577 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50578 + current_uid()) {
50579 + error = -EPERM;
50580 + goto out;
50581 + }
50582 +
50583 + /* ensure pw and special role name are null terminated */
50584 +
50585 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50586 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50587 +
50588 + /* Okay.
50589 + * We have our enough of the argument structure..(we have yet
50590 + * to copy_from_user the tables themselves) . Copy the tables
50591 + * only if we need them, i.e. for loading operations. */
50592 +
50593 + switch (gr_usermode->mode) {
50594 + case GR_STATUS:
50595 + if (gr_status & GR_READY) {
50596 + error = 1;
50597 + if (!gr_check_secure_terminal(current))
50598 + error = 3;
50599 + } else
50600 + error = 2;
50601 + goto out;
50602 + case GR_SHUTDOWN:
50603 + if ((gr_status & GR_READY)
50604 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50605 + pax_open_kernel();
50606 + gr_status &= ~GR_READY;
50607 + pax_close_kernel();
50608 +
50609 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50610 + free_variables();
50611 + memset(gr_usermode, 0, sizeof (struct gr_arg));
50612 + memset(gr_system_salt, 0, GR_SALT_LEN);
50613 + memset(gr_system_sum, 0, GR_SHA_LEN);
50614 + } else if (gr_status & GR_READY) {
50615 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50616 + error = -EPERM;
50617 + } else {
50618 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50619 + error = -EAGAIN;
50620 + }
50621 + break;
50622 + case GR_ENABLE:
50623 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50624 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50625 + else {
50626 + if (gr_status & GR_READY)
50627 + error = -EAGAIN;
50628 + else
50629 + error = error2;
50630 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50631 + }
50632 + break;
50633 + case GR_RELOAD:
50634 + if (!(gr_status & GR_READY)) {
50635 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50636 + error = -EAGAIN;
50637 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50638 + preempt_disable();
50639 +
50640 + pax_open_kernel();
50641 + gr_status &= ~GR_READY;
50642 + pax_close_kernel();
50643 +
50644 + free_variables();
50645 + if (!(error2 = gracl_init(gr_usermode))) {
50646 + preempt_enable();
50647 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50648 + } else {
50649 + preempt_enable();
50650 + error = error2;
50651 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50652 + }
50653 + } else {
50654 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50655 + error = -EPERM;
50656 + }
50657 + break;
50658 + case GR_SEGVMOD:
50659 + if (unlikely(!(gr_status & GR_READY))) {
50660 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50661 + error = -EAGAIN;
50662 + break;
50663 + }
50664 +
50665 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50666 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50667 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50668 + struct acl_subject_label *segvacl;
50669 + segvacl =
50670 + lookup_acl_subj_label(gr_usermode->segv_inode,
50671 + gr_usermode->segv_device,
50672 + current->role);
50673 + if (segvacl) {
50674 + segvacl->crashes = 0;
50675 + segvacl->expires = 0;
50676 + }
50677 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50678 + gr_remove_uid(gr_usermode->segv_uid);
50679 + }
50680 + } else {
50681 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50682 + error = -EPERM;
50683 + }
50684 + break;
50685 + case GR_SPROLE:
50686 + case GR_SPROLEPAM:
50687 + if (unlikely(!(gr_status & GR_READY))) {
50688 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50689 + error = -EAGAIN;
50690 + break;
50691 + }
50692 +
50693 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50694 + current->role->expires = 0;
50695 + current->role->auth_attempts = 0;
50696 + }
50697 +
50698 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50699 + time_after(current->role->expires, get_seconds())) {
50700 + error = -EBUSY;
50701 + goto out;
50702 + }
50703 +
50704 + if (lookup_special_role_auth
50705 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50706 + && ((!sprole_salt && !sprole_sum)
50707 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50708 + char *p = "";
50709 + assign_special_role(gr_usermode->sp_role);
50710 + read_lock(&tasklist_lock);
50711 + if (current->real_parent)
50712 + p = current->real_parent->role->rolename;
50713 + read_unlock(&tasklist_lock);
50714 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50715 + p, acl_sp_role_value);
50716 + } else {
50717 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50718 + error = -EPERM;
50719 + if(!(current->role->auth_attempts++))
50720 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50721 +
50722 + goto out;
50723 + }
50724 + break;
50725 + case GR_UNSPROLE:
50726 + if (unlikely(!(gr_status & GR_READY))) {
50727 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50728 + error = -EAGAIN;
50729 + break;
50730 + }
50731 +
50732 + if (current->role->roletype & GR_ROLE_SPECIAL) {
50733 + char *p = "";
50734 + int i = 0;
50735 +
50736 + read_lock(&tasklist_lock);
50737 + if (current->real_parent) {
50738 + p = current->real_parent->role->rolename;
50739 + i = current->real_parent->acl_role_id;
50740 + }
50741 + read_unlock(&tasklist_lock);
50742 +
50743 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50744 + gr_set_acls(1);
50745 + } else {
50746 + error = -EPERM;
50747 + goto out;
50748 + }
50749 + break;
50750 + default:
50751 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50752 + error = -EINVAL;
50753 + break;
50754 + }
50755 +
50756 + if (error != -EPERM)
50757 + goto out;
50758 +
50759 + if(!(gr_auth_attempts++))
50760 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50761 +
50762 + out:
50763 + mutex_unlock(&gr_dev_mutex);
50764 + return error;
50765 +}
50766 +
50767 +/* must be called with
50768 + rcu_read_lock();
50769 + read_lock(&tasklist_lock);
50770 + read_lock(&grsec_exec_file_lock);
50771 +*/
50772 +int gr_apply_subject_to_task(struct task_struct *task)
50773 +{
50774 + struct acl_object_label *obj;
50775 + char *tmpname;
50776 + struct acl_subject_label *tmpsubj;
50777 + struct file *filp;
50778 + struct name_entry *nmatch;
50779 +
50780 + filp = task->exec_file;
50781 + if (filp == NULL)
50782 + return 0;
50783 +
50784 + /* the following is to apply the correct subject
50785 + on binaries running when the RBAC system
50786 + is enabled, when the binaries have been
50787 + replaced or deleted since their execution
50788 + -----
50789 + when the RBAC system starts, the inode/dev
50790 + from exec_file will be one the RBAC system
50791 + is unaware of. It only knows the inode/dev
50792 + of the present file on disk, or the absence
50793 + of it.
50794 + */
50795 + preempt_disable();
50796 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50797 +
50798 + nmatch = lookup_name_entry(tmpname);
50799 + preempt_enable();
50800 + tmpsubj = NULL;
50801 + if (nmatch) {
50802 + if (nmatch->deleted)
50803 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50804 + else
50805 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50806 + if (tmpsubj != NULL)
50807 + task->acl = tmpsubj;
50808 + }
50809 + if (tmpsubj == NULL)
50810 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50811 + task->role);
50812 + if (task->acl) {
50813 + task->is_writable = 0;
50814 + /* ignore additional mmap checks for processes that are writable
50815 + by the default ACL */
50816 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50817 + if (unlikely(obj->mode & GR_WRITE))
50818 + task->is_writable = 1;
50819 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50820 + if (unlikely(obj->mode & GR_WRITE))
50821 + task->is_writable = 1;
50822 +
50823 + gr_set_proc_res(task);
50824 +
50825 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50826 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50827 +#endif
50828 + } else {
50829 + return 1;
50830 + }
50831 +
50832 + return 0;
50833 +}
50834 +
50835 +int
50836 +gr_set_acls(const int type)
50837 +{
50838 + struct task_struct *task, *task2;
50839 + struct acl_role_label *role = current->role;
50840 + __u16 acl_role_id = current->acl_role_id;
50841 + const struct cred *cred;
50842 + int ret;
50843 +
50844 + rcu_read_lock();
50845 + read_lock(&tasklist_lock);
50846 + read_lock(&grsec_exec_file_lock);
50847 + do_each_thread(task2, task) {
50848 + /* check to see if we're called from the exit handler,
50849 + if so, only replace ACLs that have inherited the admin
50850 + ACL */
50851 +
50852 + if (type && (task->role != role ||
50853 + task->acl_role_id != acl_role_id))
50854 + continue;
50855 +
50856 + task->acl_role_id = 0;
50857 + task->acl_sp_role = 0;
50858 +
50859 + if (task->exec_file) {
50860 + cred = __task_cred(task);
50861 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50862 + ret = gr_apply_subject_to_task(task);
50863 + if (ret) {
50864 + read_unlock(&grsec_exec_file_lock);
50865 + read_unlock(&tasklist_lock);
50866 + rcu_read_unlock();
50867 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50868 + return ret;
50869 + }
50870 + } else {
50871 + // it's a kernel process
50872 + task->role = kernel_role;
50873 + task->acl = kernel_role->root_label;
50874 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50875 + task->acl->mode &= ~GR_PROCFIND;
50876 +#endif
50877 + }
50878 + } while_each_thread(task2, task);
50879 + read_unlock(&grsec_exec_file_lock);
50880 + read_unlock(&tasklist_lock);
50881 + rcu_read_unlock();
50882 +
50883 + return 0;
50884 +}
50885 +
50886 +void
50887 +gr_learn_resource(const struct task_struct *task,
50888 + const int res, const unsigned long wanted, const int gt)
50889 +{
50890 + struct acl_subject_label *acl;
50891 + const struct cred *cred;
50892 +
50893 + if (unlikely((gr_status & GR_READY) &&
50894 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50895 + goto skip_reslog;
50896 +
50897 +#ifdef CONFIG_GRKERNSEC_RESLOG
50898 + gr_log_resource(task, res, wanted, gt);
50899 +#endif
50900 + skip_reslog:
50901 +
50902 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50903 + return;
50904 +
50905 + acl = task->acl;
50906 +
50907 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50908 + !(acl->resmask & (1 << (unsigned short) res))))
50909 + return;
50910 +
50911 + if (wanted >= acl->res[res].rlim_cur) {
50912 + unsigned long res_add;
50913 +
50914 + res_add = wanted;
50915 + switch (res) {
50916 + case RLIMIT_CPU:
50917 + res_add += GR_RLIM_CPU_BUMP;
50918 + break;
50919 + case RLIMIT_FSIZE:
50920 + res_add += GR_RLIM_FSIZE_BUMP;
50921 + break;
50922 + case RLIMIT_DATA:
50923 + res_add += GR_RLIM_DATA_BUMP;
50924 + break;
50925 + case RLIMIT_STACK:
50926 + res_add += GR_RLIM_STACK_BUMP;
50927 + break;
50928 + case RLIMIT_CORE:
50929 + res_add += GR_RLIM_CORE_BUMP;
50930 + break;
50931 + case RLIMIT_RSS:
50932 + res_add += GR_RLIM_RSS_BUMP;
50933 + break;
50934 + case RLIMIT_NPROC:
50935 + res_add += GR_RLIM_NPROC_BUMP;
50936 + break;
50937 + case RLIMIT_NOFILE:
50938 + res_add += GR_RLIM_NOFILE_BUMP;
50939 + break;
50940 + case RLIMIT_MEMLOCK:
50941 + res_add += GR_RLIM_MEMLOCK_BUMP;
50942 + break;
50943 + case RLIMIT_AS:
50944 + res_add += GR_RLIM_AS_BUMP;
50945 + break;
50946 + case RLIMIT_LOCKS:
50947 + res_add += GR_RLIM_LOCKS_BUMP;
50948 + break;
50949 + case RLIMIT_SIGPENDING:
50950 + res_add += GR_RLIM_SIGPENDING_BUMP;
50951 + break;
50952 + case RLIMIT_MSGQUEUE:
50953 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50954 + break;
50955 + case RLIMIT_NICE:
50956 + res_add += GR_RLIM_NICE_BUMP;
50957 + break;
50958 + case RLIMIT_RTPRIO:
50959 + res_add += GR_RLIM_RTPRIO_BUMP;
50960 + break;
50961 + case RLIMIT_RTTIME:
50962 + res_add += GR_RLIM_RTTIME_BUMP;
50963 + break;
50964 + }
50965 +
50966 + acl->res[res].rlim_cur = res_add;
50967 +
50968 + if (wanted > acl->res[res].rlim_max)
50969 + acl->res[res].rlim_max = res_add;
50970 +
50971 + /* only log the subject filename, since resource logging is supported for
50972 + single-subject learning only */
50973 + rcu_read_lock();
50974 + cred = __task_cred(task);
50975 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50976 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50977 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50978 + "", (unsigned long) res, &task->signal->saved_ip);
50979 + rcu_read_unlock();
50980 + }
50981 +
50982 + return;
50983 +}
50984 +
50985 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50986 +void
50987 +pax_set_initial_flags(struct linux_binprm *bprm)
50988 +{
50989 + struct task_struct *task = current;
50990 + struct acl_subject_label *proc;
50991 + unsigned long flags;
50992 +
50993 + if (unlikely(!(gr_status & GR_READY)))
50994 + return;
50995 +
50996 + flags = pax_get_flags(task);
50997 +
50998 + proc = task->acl;
50999 +
51000 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51001 + flags &= ~MF_PAX_PAGEEXEC;
51002 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51003 + flags &= ~MF_PAX_SEGMEXEC;
51004 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51005 + flags &= ~MF_PAX_RANDMMAP;
51006 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51007 + flags &= ~MF_PAX_EMUTRAMP;
51008 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51009 + flags &= ~MF_PAX_MPROTECT;
51010 +
51011 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51012 + flags |= MF_PAX_PAGEEXEC;
51013 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51014 + flags |= MF_PAX_SEGMEXEC;
51015 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51016 + flags |= MF_PAX_RANDMMAP;
51017 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51018 + flags |= MF_PAX_EMUTRAMP;
51019 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51020 + flags |= MF_PAX_MPROTECT;
51021 +
51022 + pax_set_flags(task, flags);
51023 +
51024 + return;
51025 +}
51026 +#endif
51027 +
51028 +#ifdef CONFIG_SYSCTL
51029 +/* Eric Biederman likes breaking userland ABI and every inode-based security
51030 + system to save 35kb of memory */
51031 +
51032 +/* we modify the passed in filename, but adjust it back before returning */
51033 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51034 +{
51035 + struct name_entry *nmatch;
51036 + char *p, *lastp = NULL;
51037 + struct acl_object_label *obj = NULL, *tmp;
51038 + struct acl_subject_label *tmpsubj;
51039 + char c = '\0';
51040 +
51041 + read_lock(&gr_inode_lock);
51042 +
51043 + p = name + len - 1;
51044 + do {
51045 + nmatch = lookup_name_entry(name);
51046 + if (lastp != NULL)
51047 + *lastp = c;
51048 +
51049 + if (nmatch == NULL)
51050 + goto next_component;
51051 + tmpsubj = current->acl;
51052 + do {
51053 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51054 + if (obj != NULL) {
51055 + tmp = obj->globbed;
51056 + while (tmp) {
51057 + if (!glob_match(tmp->filename, name)) {
51058 + obj = tmp;
51059 + goto found_obj;
51060 + }
51061 + tmp = tmp->next;
51062 + }
51063 + goto found_obj;
51064 + }
51065 + } while ((tmpsubj = tmpsubj->parent_subject));
51066 +next_component:
51067 + /* end case */
51068 + if (p == name)
51069 + break;
51070 +
51071 + while (*p != '/')
51072 + p--;
51073 + if (p == name)
51074 + lastp = p + 1;
51075 + else {
51076 + lastp = p;
51077 + p--;
51078 + }
51079 + c = *lastp;
51080 + *lastp = '\0';
51081 + } while (1);
51082 +found_obj:
51083 + read_unlock(&gr_inode_lock);
51084 + /* obj returned will always be non-null */
51085 + return obj;
51086 +}
51087 +
51088 +/* returns 0 when allowing, non-zero on error
51089 + op of 0 is used for readdir, so we don't log the names of hidden files
51090 +*/
51091 +__u32
51092 +gr_handle_sysctl(const struct ctl_table *table, const int op)
51093 +{
51094 + struct ctl_table *tmp;
51095 + const char *proc_sys = "/proc/sys";
51096 + char *path;
51097 + struct acl_object_label *obj;
51098 + unsigned short len = 0, pos = 0, depth = 0, i;
51099 + __u32 err = 0;
51100 + __u32 mode = 0;
51101 +
51102 + if (unlikely(!(gr_status & GR_READY)))
51103 + return 0;
51104 +
51105 + /* for now, ignore operations on non-sysctl entries if it's not a
51106 + readdir*/
51107 + if (table->child != NULL && op != 0)
51108 + return 0;
51109 +
51110 + mode |= GR_FIND;
51111 + /* it's only a read if it's an entry, read on dirs is for readdir */
51112 + if (op & MAY_READ)
51113 + mode |= GR_READ;
51114 + if (op & MAY_WRITE)
51115 + mode |= GR_WRITE;
51116 +
51117 + preempt_disable();
51118 +
51119 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51120 +
51121 + /* it's only a read/write if it's an actual entry, not a dir
51122 + (which are opened for readdir)
51123 + */
51124 +
51125 + /* convert the requested sysctl entry into a pathname */
51126 +
51127 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51128 + len += strlen(tmp->procname);
51129 + len++;
51130 + depth++;
51131 + }
51132 +
51133 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51134 + /* deny */
51135 + goto out;
51136 + }
51137 +
51138 + memset(path, 0, PAGE_SIZE);
51139 +
51140 + memcpy(path, proc_sys, strlen(proc_sys));
51141 +
51142 + pos += strlen(proc_sys);
51143 +
51144 + for (; depth > 0; depth--) {
51145 + path[pos] = '/';
51146 + pos++;
51147 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51148 + if (depth == i) {
51149 + memcpy(path + pos, tmp->procname,
51150 + strlen(tmp->procname));
51151 + pos += strlen(tmp->procname);
51152 + }
51153 + i++;
51154 + }
51155 + }
51156 +
51157 + obj = gr_lookup_by_name(path, pos);
51158 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51159 +
51160 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51161 + ((err & mode) != mode))) {
51162 + __u32 new_mode = mode;
51163 +
51164 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51165 +
51166 + err = 0;
51167 + gr_log_learn_sysctl(path, new_mode);
51168 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51169 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51170 + err = -ENOENT;
51171 + } else if (!(err & GR_FIND)) {
51172 + err = -ENOENT;
51173 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51174 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51175 + path, (mode & GR_READ) ? " reading" : "",
51176 + (mode & GR_WRITE) ? " writing" : "");
51177 + err = -EACCES;
51178 + } else if ((err & mode) != mode) {
51179 + err = -EACCES;
51180 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51181 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51182 + path, (mode & GR_READ) ? " reading" : "",
51183 + (mode & GR_WRITE) ? " writing" : "");
51184 + err = 0;
51185 + } else
51186 + err = 0;
51187 +
51188 + out:
51189 + preempt_enable();
51190 +
51191 + return err;
51192 +}
51193 +#endif
51194 +
51195 +int
51196 +gr_handle_proc_ptrace(struct task_struct *task)
51197 +{
51198 + struct file *filp;
51199 + struct task_struct *tmp = task;
51200 + struct task_struct *curtemp = current;
51201 + __u32 retmode;
51202 +
51203 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51204 + if (unlikely(!(gr_status & GR_READY)))
51205 + return 0;
51206 +#endif
51207 +
51208 + read_lock(&tasklist_lock);
51209 + read_lock(&grsec_exec_file_lock);
51210 + filp = task->exec_file;
51211 +
51212 + while (tmp->pid > 0) {
51213 + if (tmp == curtemp)
51214 + break;
51215 + tmp = tmp->real_parent;
51216 + }
51217 +
51218 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51219 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51220 + read_unlock(&grsec_exec_file_lock);
51221 + read_unlock(&tasklist_lock);
51222 + return 1;
51223 + }
51224 +
51225 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51226 + if (!(gr_status & GR_READY)) {
51227 + read_unlock(&grsec_exec_file_lock);
51228 + read_unlock(&tasklist_lock);
51229 + return 0;
51230 + }
51231 +#endif
51232 +
51233 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51234 + read_unlock(&grsec_exec_file_lock);
51235 + read_unlock(&tasklist_lock);
51236 +
51237 + if (retmode & GR_NOPTRACE)
51238 + return 1;
51239 +
51240 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51241 + && (current->acl != task->acl || (current->acl != current->role->root_label
51242 + && current->pid != task->pid)))
51243 + return 1;
51244 +
51245 + return 0;
51246 +}
51247 +
51248 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51249 +{
51250 + if (unlikely(!(gr_status & GR_READY)))
51251 + return;
51252 +
51253 + if (!(current->role->roletype & GR_ROLE_GOD))
51254 + return;
51255 +
51256 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51257 + p->role->rolename, gr_task_roletype_to_char(p),
51258 + p->acl->filename);
51259 +}
51260 +
51261 +int
51262 +gr_handle_ptrace(struct task_struct *task, const long request)
51263 +{
51264 + struct task_struct *tmp = task;
51265 + struct task_struct *curtemp = current;
51266 + __u32 retmode;
51267 +
51268 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51269 + if (unlikely(!(gr_status & GR_READY)))
51270 + return 0;
51271 +#endif
51272 +
51273 + read_lock(&tasklist_lock);
51274 + while (tmp->pid > 0) {
51275 + if (tmp == curtemp)
51276 + break;
51277 + tmp = tmp->real_parent;
51278 + }
51279 +
51280 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51281 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51282 + read_unlock(&tasklist_lock);
51283 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51284 + return 1;
51285 + }
51286 + read_unlock(&tasklist_lock);
51287 +
51288 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51289 + if (!(gr_status & GR_READY))
51290 + return 0;
51291 +#endif
51292 +
51293 + read_lock(&grsec_exec_file_lock);
51294 + if (unlikely(!task->exec_file)) {
51295 + read_unlock(&grsec_exec_file_lock);
51296 + return 0;
51297 + }
51298 +
51299 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51300 + read_unlock(&grsec_exec_file_lock);
51301 +
51302 + if (retmode & GR_NOPTRACE) {
51303 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51304 + return 1;
51305 + }
51306 +
51307 + if (retmode & GR_PTRACERD) {
51308 + switch (request) {
51309 + case PTRACE_POKETEXT:
51310 + case PTRACE_POKEDATA:
51311 + case PTRACE_POKEUSR:
51312 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51313 + case PTRACE_SETREGS:
51314 + case PTRACE_SETFPREGS:
51315 +#endif
51316 +#ifdef CONFIG_X86
51317 + case PTRACE_SETFPXREGS:
51318 +#endif
51319 +#ifdef CONFIG_ALTIVEC
51320 + case PTRACE_SETVRREGS:
51321 +#endif
51322 + return 1;
51323 + default:
51324 + return 0;
51325 + }
51326 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
51327 + !(current->role->roletype & GR_ROLE_GOD) &&
51328 + (current->acl != task->acl)) {
51329 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51330 + return 1;
51331 + }
51332 +
51333 + return 0;
51334 +}
51335 +
51336 +static int is_writable_mmap(const struct file *filp)
51337 +{
51338 + struct task_struct *task = current;
51339 + struct acl_object_label *obj, *obj2;
51340 +
51341 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51342 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51343 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51344 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51345 + task->role->root_label);
51346 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51347 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51348 + return 1;
51349 + }
51350 + }
51351 + return 0;
51352 +}
51353 +
51354 +int
51355 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51356 +{
51357 + __u32 mode;
51358 +
51359 + if (unlikely(!file || !(prot & PROT_EXEC)))
51360 + return 1;
51361 +
51362 + if (is_writable_mmap(file))
51363 + return 0;
51364 +
51365 + mode =
51366 + gr_search_file(file->f_path.dentry,
51367 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51368 + file->f_path.mnt);
51369 +
51370 + if (!gr_tpe_allow(file))
51371 + return 0;
51372 +
51373 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51374 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51375 + return 0;
51376 + } else if (unlikely(!(mode & GR_EXEC))) {
51377 + return 0;
51378 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51379 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51380 + return 1;
51381 + }
51382 +
51383 + return 1;
51384 +}
51385 +
51386 +int
51387 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51388 +{
51389 + __u32 mode;
51390 +
51391 + if (unlikely(!file || !(prot & PROT_EXEC)))
51392 + return 1;
51393 +
51394 + if (is_writable_mmap(file))
51395 + return 0;
51396 +
51397 + mode =
51398 + gr_search_file(file->f_path.dentry,
51399 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51400 + file->f_path.mnt);
51401 +
51402 + if (!gr_tpe_allow(file))
51403 + return 0;
51404 +
51405 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51406 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51407 + return 0;
51408 + } else if (unlikely(!(mode & GR_EXEC))) {
51409 + return 0;
51410 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51411 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51412 + return 1;
51413 + }
51414 +
51415 + return 1;
51416 +}
51417 +
51418 +void
51419 +gr_acl_handle_psacct(struct task_struct *task, const long code)
51420 +{
51421 + unsigned long runtime;
51422 + unsigned long cputime;
51423 + unsigned int wday, cday;
51424 + __u8 whr, chr;
51425 + __u8 wmin, cmin;
51426 + __u8 wsec, csec;
51427 + struct timespec timeval;
51428 +
51429 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51430 + !(task->acl->mode & GR_PROCACCT)))
51431 + return;
51432 +
51433 + do_posix_clock_monotonic_gettime(&timeval);
51434 + runtime = timeval.tv_sec - task->start_time.tv_sec;
51435 + wday = runtime / (3600 * 24);
51436 + runtime -= wday * (3600 * 24);
51437 + whr = runtime / 3600;
51438 + runtime -= whr * 3600;
51439 + wmin = runtime / 60;
51440 + runtime -= wmin * 60;
51441 + wsec = runtime;
51442 +
51443 + cputime = (task->utime + task->stime) / HZ;
51444 + cday = cputime / (3600 * 24);
51445 + cputime -= cday * (3600 * 24);
51446 + chr = cputime / 3600;
51447 + cputime -= chr * 3600;
51448 + cmin = cputime / 60;
51449 + cputime -= cmin * 60;
51450 + csec = cputime;
51451 +
51452 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51453 +
51454 + return;
51455 +}
51456 +
51457 +void gr_set_kernel_label(struct task_struct *task)
51458 +{
51459 + if (gr_status & GR_READY) {
51460 + task->role = kernel_role;
51461 + task->acl = kernel_role->root_label;
51462 + }
51463 + return;
51464 +}
51465 +
51466 +#ifdef CONFIG_TASKSTATS
51467 +int gr_is_taskstats_denied(int pid)
51468 +{
51469 + struct task_struct *task;
51470 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51471 + const struct cred *cred;
51472 +#endif
51473 + int ret = 0;
51474 +
51475 + /* restrict taskstats viewing to un-chrooted root users
51476 + who have the 'view' subject flag if the RBAC system is enabled
51477 + */
51478 +
51479 + rcu_read_lock();
51480 + read_lock(&tasklist_lock);
51481 + task = find_task_by_vpid(pid);
51482 + if (task) {
51483 +#ifdef CONFIG_GRKERNSEC_CHROOT
51484 + if (proc_is_chrooted(task))
51485 + ret = -EACCES;
51486 +#endif
51487 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51488 + cred = __task_cred(task);
51489 +#ifdef CONFIG_GRKERNSEC_PROC_USER
51490 + if (cred->uid != 0)
51491 + ret = -EACCES;
51492 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51493 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51494 + ret = -EACCES;
51495 +#endif
51496 +#endif
51497 + if (gr_status & GR_READY) {
51498 + if (!(task->acl->mode & GR_VIEW))
51499 + ret = -EACCES;
51500 + }
51501 + } else
51502 + ret = -ENOENT;
51503 +
51504 + read_unlock(&tasklist_lock);
51505 + rcu_read_unlock();
51506 +
51507 + return ret;
51508 +}
51509 +#endif
51510 +
51511 +/* AUXV entries are filled via a descendant of search_binary_handler
51512 + after we've already applied the subject for the target
51513 +*/
51514 +int gr_acl_enable_at_secure(void)
51515 +{
51516 + if (unlikely(!(gr_status & GR_READY)))
51517 + return 0;
51518 +
51519 + if (current->acl->mode & GR_ATSECURE)
51520 + return 1;
51521 +
51522 + return 0;
51523 +}
51524 +
51525 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51526 +{
51527 + struct task_struct *task = current;
51528 + struct dentry *dentry = file->f_path.dentry;
51529 + struct vfsmount *mnt = file->f_path.mnt;
51530 + struct acl_object_label *obj, *tmp;
51531 + struct acl_subject_label *subj;
51532 + unsigned int bufsize;
51533 + int is_not_root;
51534 + char *path;
51535 + dev_t dev = __get_dev(dentry);
51536 +
51537 + if (unlikely(!(gr_status & GR_READY)))
51538 + return 1;
51539 +
51540 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51541 + return 1;
51542 +
51543 + /* ignore Eric Biederman */
51544 + if (IS_PRIVATE(dentry->d_inode))
51545 + return 1;
51546 +
51547 + subj = task->acl;
51548 + do {
51549 + obj = lookup_acl_obj_label(ino, dev, subj);
51550 + if (obj != NULL)
51551 + return (obj->mode & GR_FIND) ? 1 : 0;
51552 + } while ((subj = subj->parent_subject));
51553 +
51554 + /* this is purely an optimization since we're looking for an object
51555 + for the directory we're doing a readdir on
51556 + if it's possible for any globbed object to match the entry we're
51557 + filling into the directory, then the object we find here will be
51558 + an anchor point with attached globbed objects
51559 + */
51560 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51561 + if (obj->globbed == NULL)
51562 + return (obj->mode & GR_FIND) ? 1 : 0;
51563 +
51564 + is_not_root = ((obj->filename[0] == '/') &&
51565 + (obj->filename[1] == '\0')) ? 0 : 1;
51566 + bufsize = PAGE_SIZE - namelen - is_not_root;
51567 +
51568 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
51569 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51570 + return 1;
51571 +
51572 + preempt_disable();
51573 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51574 + bufsize);
51575 +
51576 + bufsize = strlen(path);
51577 +
51578 + /* if base is "/", don't append an additional slash */
51579 + if (is_not_root)
51580 + *(path + bufsize) = '/';
51581 + memcpy(path + bufsize + is_not_root, name, namelen);
51582 + *(path + bufsize + namelen + is_not_root) = '\0';
51583 +
51584 + tmp = obj->globbed;
51585 + while (tmp) {
51586 + if (!glob_match(tmp->filename, path)) {
51587 + preempt_enable();
51588 + return (tmp->mode & GR_FIND) ? 1 : 0;
51589 + }
51590 + tmp = tmp->next;
51591 + }
51592 + preempt_enable();
51593 + return (obj->mode & GR_FIND) ? 1 : 0;
51594 +}
51595 +
51596 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51597 +EXPORT_SYMBOL(gr_acl_is_enabled);
51598 +#endif
51599 +EXPORT_SYMBOL(gr_learn_resource);
51600 +EXPORT_SYMBOL(gr_set_kernel_label);
51601 +#ifdef CONFIG_SECURITY
51602 +EXPORT_SYMBOL(gr_check_user_change);
51603 +EXPORT_SYMBOL(gr_check_group_change);
51604 +#endif
51605 +
51606 diff -urNp linux-3.0.8/grsecurity/gracl_cap.c linux-3.0.8/grsecurity/gracl_cap.c
51607 --- linux-3.0.8/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51608 +++ linux-3.0.8/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
51609 @@ -0,0 +1,101 @@
51610 +#include <linux/kernel.h>
51611 +#include <linux/module.h>
51612 +#include <linux/sched.h>
51613 +#include <linux/gracl.h>
51614 +#include <linux/grsecurity.h>
51615 +#include <linux/grinternal.h>
51616 +
51617 +extern const char *captab_log[];
51618 +extern int captab_log_entries;
51619 +
51620 +int
51621 +gr_acl_is_capable(const int cap)
51622 +{
51623 + struct task_struct *task = current;
51624 + const struct cred *cred = current_cred();
51625 + struct acl_subject_label *curracl;
51626 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51627 + kernel_cap_t cap_audit = __cap_empty_set;
51628 +
51629 + if (!gr_acl_is_enabled())
51630 + return 1;
51631 +
51632 + curracl = task->acl;
51633 +
51634 + cap_drop = curracl->cap_lower;
51635 + cap_mask = curracl->cap_mask;
51636 + cap_audit = curracl->cap_invert_audit;
51637 +
51638 + while ((curracl = curracl->parent_subject)) {
51639 + /* if the cap isn't specified in the current computed mask but is specified in the
51640 + current level subject, and is lowered in the current level subject, then add
51641 + it to the set of dropped capabilities
51642 + otherwise, add the current level subject's mask to the current computed mask
51643 + */
51644 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51645 + cap_raise(cap_mask, cap);
51646 + if (cap_raised(curracl->cap_lower, cap))
51647 + cap_raise(cap_drop, cap);
51648 + if (cap_raised(curracl->cap_invert_audit, cap))
51649 + cap_raise(cap_audit, cap);
51650 + }
51651 + }
51652 +
51653 + if (!cap_raised(cap_drop, cap)) {
51654 + if (cap_raised(cap_audit, cap))
51655 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51656 + return 1;
51657 + }
51658 +
51659 + curracl = task->acl;
51660 +
51661 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51662 + && cap_raised(cred->cap_effective, cap)) {
51663 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51664 + task->role->roletype, cred->uid,
51665 + cred->gid, task->exec_file ?
51666 + gr_to_filename(task->exec_file->f_path.dentry,
51667 + task->exec_file->f_path.mnt) : curracl->filename,
51668 + curracl->filename, 0UL,
51669 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51670 + return 1;
51671 + }
51672 +
51673 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51674 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51675 + return 0;
51676 +}
51677 +
51678 +int
51679 +gr_acl_is_capable_nolog(const int cap)
51680 +{
51681 + struct acl_subject_label *curracl;
51682 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51683 +
51684 + if (!gr_acl_is_enabled())
51685 + return 1;
51686 +
51687 + curracl = current->acl;
51688 +
51689 + cap_drop = curracl->cap_lower;
51690 + cap_mask = curracl->cap_mask;
51691 +
51692 + while ((curracl = curracl->parent_subject)) {
51693 + /* if the cap isn't specified in the current computed mask but is specified in the
51694 + current level subject, and is lowered in the current level subject, then add
51695 + it to the set of dropped capabilities
51696 + otherwise, add the current level subject's mask to the current computed mask
51697 + */
51698 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51699 + cap_raise(cap_mask, cap);
51700 + if (cap_raised(curracl->cap_lower, cap))
51701 + cap_raise(cap_drop, cap);
51702 + }
51703 + }
51704 +
51705 + if (!cap_raised(cap_drop, cap))
51706 + return 1;
51707 +
51708 + return 0;
51709 +}
51710 +
51711 diff -urNp linux-3.0.8/grsecurity/gracl_fs.c linux-3.0.8/grsecurity/gracl_fs.c
51712 --- linux-3.0.8/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51713 +++ linux-3.0.8/grsecurity/gracl_fs.c 2011-10-17 01:22:26.000000000 -0400
51714 @@ -0,0 +1,431 @@
51715 +#include <linux/kernel.h>
51716 +#include <linux/sched.h>
51717 +#include <linux/types.h>
51718 +#include <linux/fs.h>
51719 +#include <linux/file.h>
51720 +#include <linux/stat.h>
51721 +#include <linux/grsecurity.h>
51722 +#include <linux/grinternal.h>
51723 +#include <linux/gracl.h>
51724 +
51725 +__u32
51726 +gr_acl_handle_hidden_file(const struct dentry * dentry,
51727 + const struct vfsmount * mnt)
51728 +{
51729 + __u32 mode;
51730 +
51731 + if (unlikely(!dentry->d_inode))
51732 + return GR_FIND;
51733 +
51734 + mode =
51735 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51736 +
51737 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51738 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51739 + return mode;
51740 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51741 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51742 + return 0;
51743 + } else if (unlikely(!(mode & GR_FIND)))
51744 + return 0;
51745 +
51746 + return GR_FIND;
51747 +}
51748 +
51749 +__u32
51750 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51751 + const int fmode)
51752 +{
51753 + __u32 reqmode = GR_FIND;
51754 + __u32 mode;
51755 +
51756 + if (unlikely(!dentry->d_inode))
51757 + return reqmode;
51758 +
51759 + if (unlikely(fmode & O_APPEND))
51760 + reqmode |= GR_APPEND;
51761 + else if (unlikely(fmode & FMODE_WRITE))
51762 + reqmode |= GR_WRITE;
51763 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51764 + reqmode |= GR_READ;
51765 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
51766 + reqmode &= ~GR_READ;
51767 + mode =
51768 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51769 + mnt);
51770 +
51771 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51772 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51773 + reqmode & GR_READ ? " reading" : "",
51774 + reqmode & GR_WRITE ? " writing" : reqmode &
51775 + GR_APPEND ? " appending" : "");
51776 + return reqmode;
51777 + } else
51778 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51779 + {
51780 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51781 + reqmode & GR_READ ? " reading" : "",
51782 + reqmode & GR_WRITE ? " writing" : reqmode &
51783 + GR_APPEND ? " appending" : "");
51784 + return 0;
51785 + } else if (unlikely((mode & reqmode) != reqmode))
51786 + return 0;
51787 +
51788 + return reqmode;
51789 +}
51790 +
51791 +__u32
51792 +gr_acl_handle_creat(const struct dentry * dentry,
51793 + const struct dentry * p_dentry,
51794 + const struct vfsmount * p_mnt, const int fmode,
51795 + const int imode)
51796 +{
51797 + __u32 reqmode = GR_WRITE | GR_CREATE;
51798 + __u32 mode;
51799 +
51800 + if (unlikely(fmode & O_APPEND))
51801 + reqmode |= GR_APPEND;
51802 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51803 + reqmode |= GR_READ;
51804 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51805 + reqmode |= GR_SETID;
51806 +
51807 + mode =
51808 + gr_check_create(dentry, p_dentry, p_mnt,
51809 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51810 +
51811 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51812 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51813 + reqmode & GR_READ ? " reading" : "",
51814 + reqmode & GR_WRITE ? " writing" : reqmode &
51815 + GR_APPEND ? " appending" : "");
51816 + return reqmode;
51817 + } else
51818 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51819 + {
51820 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51821 + reqmode & GR_READ ? " reading" : "",
51822 + reqmode & GR_WRITE ? " writing" : reqmode &
51823 + GR_APPEND ? " appending" : "");
51824 + return 0;
51825 + } else if (unlikely((mode & reqmode) != reqmode))
51826 + return 0;
51827 +
51828 + return reqmode;
51829 +}
51830 +
51831 +__u32
51832 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51833 + const int fmode)
51834 +{
51835 + __u32 mode, reqmode = GR_FIND;
51836 +
51837 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51838 + reqmode |= GR_EXEC;
51839 + if (fmode & S_IWOTH)
51840 + reqmode |= GR_WRITE;
51841 + if (fmode & S_IROTH)
51842 + reqmode |= GR_READ;
51843 +
51844 + mode =
51845 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51846 + mnt);
51847 +
51848 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51849 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51850 + reqmode & GR_READ ? " reading" : "",
51851 + reqmode & GR_WRITE ? " writing" : "",
51852 + reqmode & GR_EXEC ? " executing" : "");
51853 + return reqmode;
51854 + } else
51855 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51856 + {
51857 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51858 + reqmode & GR_READ ? " reading" : "",
51859 + reqmode & GR_WRITE ? " writing" : "",
51860 + reqmode & GR_EXEC ? " executing" : "");
51861 + return 0;
51862 + } else if (unlikely((mode & reqmode) != reqmode))
51863 + return 0;
51864 +
51865 + return reqmode;
51866 +}
51867 +
51868 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51869 +{
51870 + __u32 mode;
51871 +
51872 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51873 +
51874 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51875 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51876 + return mode;
51877 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51878 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51879 + return 0;
51880 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51881 + return 0;
51882 +
51883 + return (reqmode);
51884 +}
51885 +
51886 +__u32
51887 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51888 +{
51889 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51890 +}
51891 +
51892 +__u32
51893 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51894 +{
51895 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51896 +}
51897 +
51898 +__u32
51899 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51900 +{
51901 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51902 +}
51903 +
51904 +__u32
51905 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51906 +{
51907 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51908 +}
51909 +
51910 +__u32
51911 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51912 + mode_t mode)
51913 +{
51914 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51915 + return 1;
51916 +
51917 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51918 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51919 + GR_FCHMOD_ACL_MSG);
51920 + } else {
51921 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51922 + }
51923 +}
51924 +
51925 +__u32
51926 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51927 + mode_t mode)
51928 +{
51929 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51930 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51931 + GR_CHMOD_ACL_MSG);
51932 + } else {
51933 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51934 + }
51935 +}
51936 +
51937 +__u32
51938 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51939 +{
51940 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51941 +}
51942 +
51943 +__u32
51944 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51945 +{
51946 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51947 +}
51948 +
51949 +__u32
51950 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51951 +{
51952 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51953 +}
51954 +
51955 +__u32
51956 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51957 +{
51958 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51959 + GR_UNIXCONNECT_ACL_MSG);
51960 +}
51961 +
51962 +/* hardlinks require at minimum create and link permission,
51963 + any additional privilege required is based on the
51964 + privilege of the file being linked to
51965 +*/
51966 +__u32
51967 +gr_acl_handle_link(const struct dentry * new_dentry,
51968 + const struct dentry * parent_dentry,
51969 + const struct vfsmount * parent_mnt,
51970 + const struct dentry * old_dentry,
51971 + const struct vfsmount * old_mnt, const char *to)
51972 +{
51973 + __u32 mode;
51974 + __u32 needmode = GR_CREATE | GR_LINK;
51975 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51976 +
51977 + mode =
51978 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51979 + old_mnt);
51980 +
51981 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51982 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51983 + return mode;
51984 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51985 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51986 + return 0;
51987 + } else if (unlikely((mode & needmode) != needmode))
51988 + return 0;
51989 +
51990 + return 1;
51991 +}
51992 +
51993 +__u32
51994 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51995 + const struct dentry * parent_dentry,
51996 + const struct vfsmount * parent_mnt, const char *from)
51997 +{
51998 + __u32 needmode = GR_WRITE | GR_CREATE;
51999 + __u32 mode;
52000 +
52001 + mode =
52002 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
52003 + GR_CREATE | GR_AUDIT_CREATE |
52004 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52005 +
52006 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52007 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52008 + return mode;
52009 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52010 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52011 + return 0;
52012 + } else if (unlikely((mode & needmode) != needmode))
52013 + return 0;
52014 +
52015 + return (GR_WRITE | GR_CREATE);
52016 +}
52017 +
52018 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52019 +{
52020 + __u32 mode;
52021 +
52022 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52023 +
52024 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52025 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52026 + return mode;
52027 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52028 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52029 + return 0;
52030 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
52031 + return 0;
52032 +
52033 + return (reqmode);
52034 +}
52035 +
52036 +__u32
52037 +gr_acl_handle_mknod(const struct dentry * new_dentry,
52038 + const struct dentry * parent_dentry,
52039 + const struct vfsmount * parent_mnt,
52040 + const int mode)
52041 +{
52042 + __u32 reqmode = GR_WRITE | GR_CREATE;
52043 + if (unlikely(mode & (S_ISUID | S_ISGID)))
52044 + reqmode |= GR_SETID;
52045 +
52046 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52047 + reqmode, GR_MKNOD_ACL_MSG);
52048 +}
52049 +
52050 +__u32
52051 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
52052 + const struct dentry *parent_dentry,
52053 + const struct vfsmount *parent_mnt)
52054 +{
52055 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52056 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52057 +}
52058 +
52059 +#define RENAME_CHECK_SUCCESS(old, new) \
52060 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52061 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52062 +
52063 +int
52064 +gr_acl_handle_rename(struct dentry *new_dentry,
52065 + struct dentry *parent_dentry,
52066 + const struct vfsmount *parent_mnt,
52067 + struct dentry *old_dentry,
52068 + struct inode *old_parent_inode,
52069 + struct vfsmount *old_mnt, const char *newname)
52070 +{
52071 + __u32 comp1, comp2;
52072 + int error = 0;
52073 +
52074 + if (unlikely(!gr_acl_is_enabled()))
52075 + return 0;
52076 +
52077 + if (!new_dentry->d_inode) {
52078 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52079 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52080 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52081 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52082 + GR_DELETE | GR_AUDIT_DELETE |
52083 + GR_AUDIT_READ | GR_AUDIT_WRITE |
52084 + GR_SUPPRESS, old_mnt);
52085 + } else {
52086 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52087 + GR_CREATE | GR_DELETE |
52088 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52089 + GR_AUDIT_READ | GR_AUDIT_WRITE |
52090 + GR_SUPPRESS, parent_mnt);
52091 + comp2 =
52092 + gr_search_file(old_dentry,
52093 + GR_READ | GR_WRITE | GR_AUDIT_READ |
52094 + GR_DELETE | GR_AUDIT_DELETE |
52095 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52096 + }
52097 +
52098 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52099 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52100 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52101 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52102 + && !(comp2 & GR_SUPPRESS)) {
52103 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52104 + error = -EACCES;
52105 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52106 + error = -EACCES;
52107 +
52108 + return error;
52109 +}
52110 +
52111 +void
52112 +gr_acl_handle_exit(void)
52113 +{
52114 + u16 id;
52115 + char *rolename;
52116 + struct file *exec_file;
52117 +
52118 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52119 + !(current->role->roletype & GR_ROLE_PERSIST))) {
52120 + id = current->acl_role_id;
52121 + rolename = current->role->rolename;
52122 + gr_set_acls(1);
52123 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52124 + }
52125 +
52126 + write_lock(&grsec_exec_file_lock);
52127 + exec_file = current->exec_file;
52128 + current->exec_file = NULL;
52129 + write_unlock(&grsec_exec_file_lock);
52130 +
52131 + if (exec_file)
52132 + fput(exec_file);
52133 +}
52134 +
52135 +int
52136 +gr_acl_handle_procpidmem(const struct task_struct *task)
52137 +{
52138 + if (unlikely(!gr_acl_is_enabled()))
52139 + return 0;
52140 +
52141 + if (task != current && task->acl->mode & GR_PROTPROCFD)
52142 + return -EACCES;
52143 +
52144 + return 0;
52145 +}
52146 diff -urNp linux-3.0.8/grsecurity/gracl_ip.c linux-3.0.8/grsecurity/gracl_ip.c
52147 --- linux-3.0.8/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
52148 +++ linux-3.0.8/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
52149 @@ -0,0 +1,381 @@
52150 +#include <linux/kernel.h>
52151 +#include <asm/uaccess.h>
52152 +#include <asm/errno.h>
52153 +#include <net/sock.h>
52154 +#include <linux/file.h>
52155 +#include <linux/fs.h>
52156 +#include <linux/net.h>
52157 +#include <linux/in.h>
52158 +#include <linux/skbuff.h>
52159 +#include <linux/ip.h>
52160 +#include <linux/udp.h>
52161 +#include <linux/types.h>
52162 +#include <linux/sched.h>
52163 +#include <linux/netdevice.h>
52164 +#include <linux/inetdevice.h>
52165 +#include <linux/gracl.h>
52166 +#include <linux/grsecurity.h>
52167 +#include <linux/grinternal.h>
52168 +
52169 +#define GR_BIND 0x01
52170 +#define GR_CONNECT 0x02
52171 +#define GR_INVERT 0x04
52172 +#define GR_BINDOVERRIDE 0x08
52173 +#define GR_CONNECTOVERRIDE 0x10
52174 +#define GR_SOCK_FAMILY 0x20
52175 +
52176 +static const char * gr_protocols[IPPROTO_MAX] = {
52177 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52178 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52179 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52180 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52181 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52182 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52183 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52184 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52185 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52186 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52187 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52188 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52189 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52190 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52191 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52192 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52193 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52194 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52195 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52196 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52197 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52198 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52199 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52200 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52201 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52202 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52203 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52204 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52205 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52206 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52207 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52208 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52209 + };
52210 +
52211 +static const char * gr_socktypes[SOCK_MAX] = {
52212 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52213 + "unknown:7", "unknown:8", "unknown:9", "packet"
52214 + };
52215 +
52216 +static const char * gr_sockfamilies[AF_MAX+1] = {
52217 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52218 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52219 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52220 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52221 + };
52222 +
52223 +const char *
52224 +gr_proto_to_name(unsigned char proto)
52225 +{
52226 + return gr_protocols[proto];
52227 +}
52228 +
52229 +const char *
52230 +gr_socktype_to_name(unsigned char type)
52231 +{
52232 + return gr_socktypes[type];
52233 +}
52234 +
52235 +const char *
52236 +gr_sockfamily_to_name(unsigned char family)
52237 +{
52238 + return gr_sockfamilies[family];
52239 +}
52240 +
52241 +int
52242 +gr_search_socket(const int domain, const int type, const int protocol)
52243 +{
52244 + struct acl_subject_label *curr;
52245 + const struct cred *cred = current_cred();
52246 +
52247 + if (unlikely(!gr_acl_is_enabled()))
52248 + goto exit;
52249 +
52250 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
52251 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52252 + goto exit; // let the kernel handle it
52253 +
52254 + curr = current->acl;
52255 +
52256 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52257 + /* the family is allowed, if this is PF_INET allow it only if
52258 + the extra sock type/protocol checks pass */
52259 + if (domain == PF_INET)
52260 + goto inet_check;
52261 + goto exit;
52262 + } else {
52263 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52264 + __u32 fakeip = 0;
52265 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52266 + current->role->roletype, cred->uid,
52267 + cred->gid, current->exec_file ?
52268 + gr_to_filename(current->exec_file->f_path.dentry,
52269 + current->exec_file->f_path.mnt) :
52270 + curr->filename, curr->filename,
52271 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52272 + &current->signal->saved_ip);
52273 + goto exit;
52274 + }
52275 + goto exit_fail;
52276 + }
52277 +
52278 +inet_check:
52279 + /* the rest of this checking is for IPv4 only */
52280 + if (!curr->ips)
52281 + goto exit;
52282 +
52283 + if ((curr->ip_type & (1 << type)) &&
52284 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52285 + goto exit;
52286 +
52287 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52288 + /* we don't place acls on raw sockets , and sometimes
52289 + dgram/ip sockets are opened for ioctl and not
52290 + bind/connect, so we'll fake a bind learn log */
52291 + if (type == SOCK_RAW || type == SOCK_PACKET) {
52292 + __u32 fakeip = 0;
52293 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52294 + current->role->roletype, cred->uid,
52295 + cred->gid, current->exec_file ?
52296 + gr_to_filename(current->exec_file->f_path.dentry,
52297 + current->exec_file->f_path.mnt) :
52298 + curr->filename, curr->filename,
52299 + &fakeip, 0, type,
52300 + protocol, GR_CONNECT, &current->signal->saved_ip);
52301 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52302 + __u32 fakeip = 0;
52303 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52304 + current->role->roletype, cred->uid,
52305 + cred->gid, current->exec_file ?
52306 + gr_to_filename(current->exec_file->f_path.dentry,
52307 + current->exec_file->f_path.mnt) :
52308 + curr->filename, curr->filename,
52309 + &fakeip, 0, type,
52310 + protocol, GR_BIND, &current->signal->saved_ip);
52311 + }
52312 + /* we'll log when they use connect or bind */
52313 + goto exit;
52314 + }
52315 +
52316 +exit_fail:
52317 + if (domain == PF_INET)
52318 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52319 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
52320 + else
52321 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52322 + gr_socktype_to_name(type), protocol);
52323 +
52324 + return 0;
52325 +exit:
52326 + return 1;
52327 +}
52328 +
52329 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52330 +{
52331 + if ((ip->mode & mode) &&
52332 + (ip_port >= ip->low) &&
52333 + (ip_port <= ip->high) &&
52334 + ((ntohl(ip_addr) & our_netmask) ==
52335 + (ntohl(our_addr) & our_netmask))
52336 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52337 + && (ip->type & (1 << type))) {
52338 + if (ip->mode & GR_INVERT)
52339 + return 2; // specifically denied
52340 + else
52341 + return 1; // allowed
52342 + }
52343 +
52344 + return 0; // not specifically allowed, may continue parsing
52345 +}
52346 +
52347 +static int
52348 +gr_search_connectbind(const int full_mode, struct sock *sk,
52349 + struct sockaddr_in *addr, const int type)
52350 +{
52351 + char iface[IFNAMSIZ] = {0};
52352 + struct acl_subject_label *curr;
52353 + struct acl_ip_label *ip;
52354 + struct inet_sock *isk;
52355 + struct net_device *dev;
52356 + struct in_device *idev;
52357 + unsigned long i;
52358 + int ret;
52359 + int mode = full_mode & (GR_BIND | GR_CONNECT);
52360 + __u32 ip_addr = 0;
52361 + __u32 our_addr;
52362 + __u32 our_netmask;
52363 + char *p;
52364 + __u16 ip_port = 0;
52365 + const struct cred *cred = current_cred();
52366 +
52367 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52368 + return 0;
52369 +
52370 + curr = current->acl;
52371 + isk = inet_sk(sk);
52372 +
52373 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52374 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52375 + addr->sin_addr.s_addr = curr->inaddr_any_override;
52376 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52377 + struct sockaddr_in saddr;
52378 + int err;
52379 +
52380 + saddr.sin_family = AF_INET;
52381 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
52382 + saddr.sin_port = isk->inet_sport;
52383 +
52384 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52385 + if (err)
52386 + return err;
52387 +
52388 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52389 + if (err)
52390 + return err;
52391 + }
52392 +
52393 + if (!curr->ips)
52394 + return 0;
52395 +
52396 + ip_addr = addr->sin_addr.s_addr;
52397 + ip_port = ntohs(addr->sin_port);
52398 +
52399 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52400 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52401 + current->role->roletype, cred->uid,
52402 + cred->gid, current->exec_file ?
52403 + gr_to_filename(current->exec_file->f_path.dentry,
52404 + current->exec_file->f_path.mnt) :
52405 + curr->filename, curr->filename,
52406 + &ip_addr, ip_port, type,
52407 + sk->sk_protocol, mode, &current->signal->saved_ip);
52408 + return 0;
52409 + }
52410 +
52411 + for (i = 0; i < curr->ip_num; i++) {
52412 + ip = *(curr->ips + i);
52413 + if (ip->iface != NULL) {
52414 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
52415 + p = strchr(iface, ':');
52416 + if (p != NULL)
52417 + *p = '\0';
52418 + dev = dev_get_by_name(sock_net(sk), iface);
52419 + if (dev == NULL)
52420 + continue;
52421 + idev = in_dev_get(dev);
52422 + if (idev == NULL) {
52423 + dev_put(dev);
52424 + continue;
52425 + }
52426 + rcu_read_lock();
52427 + for_ifa(idev) {
52428 + if (!strcmp(ip->iface, ifa->ifa_label)) {
52429 + our_addr = ifa->ifa_address;
52430 + our_netmask = 0xffffffff;
52431 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52432 + if (ret == 1) {
52433 + rcu_read_unlock();
52434 + in_dev_put(idev);
52435 + dev_put(dev);
52436 + return 0;
52437 + } else if (ret == 2) {
52438 + rcu_read_unlock();
52439 + in_dev_put(idev);
52440 + dev_put(dev);
52441 + goto denied;
52442 + }
52443 + }
52444 + } endfor_ifa(idev);
52445 + rcu_read_unlock();
52446 + in_dev_put(idev);
52447 + dev_put(dev);
52448 + } else {
52449 + our_addr = ip->addr;
52450 + our_netmask = ip->netmask;
52451 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52452 + if (ret == 1)
52453 + return 0;
52454 + else if (ret == 2)
52455 + goto denied;
52456 + }
52457 + }
52458 +
52459 +denied:
52460 + if (mode == GR_BIND)
52461 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52462 + else if (mode == GR_CONNECT)
52463 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52464 +
52465 + return -EACCES;
52466 +}
52467 +
52468 +int
52469 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52470 +{
52471 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52472 +}
52473 +
52474 +int
52475 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52476 +{
52477 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52478 +}
52479 +
52480 +int gr_search_listen(struct socket *sock)
52481 +{
52482 + struct sock *sk = sock->sk;
52483 + struct sockaddr_in addr;
52484 +
52485 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52486 + addr.sin_port = inet_sk(sk)->inet_sport;
52487 +
52488 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52489 +}
52490 +
52491 +int gr_search_accept(struct socket *sock)
52492 +{
52493 + struct sock *sk = sock->sk;
52494 + struct sockaddr_in addr;
52495 +
52496 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52497 + addr.sin_port = inet_sk(sk)->inet_sport;
52498 +
52499 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52500 +}
52501 +
52502 +int
52503 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52504 +{
52505 + if (addr)
52506 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52507 + else {
52508 + struct sockaddr_in sin;
52509 + const struct inet_sock *inet = inet_sk(sk);
52510 +
52511 + sin.sin_addr.s_addr = inet->inet_daddr;
52512 + sin.sin_port = inet->inet_dport;
52513 +
52514 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52515 + }
52516 +}
52517 +
52518 +int
52519 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52520 +{
52521 + struct sockaddr_in sin;
52522 +
52523 + if (unlikely(skb->len < sizeof (struct udphdr)))
52524 + return 0; // skip this packet
52525 +
52526 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52527 + sin.sin_port = udp_hdr(skb)->source;
52528 +
52529 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52530 +}
52531 diff -urNp linux-3.0.8/grsecurity/gracl_learn.c linux-3.0.8/grsecurity/gracl_learn.c
52532 --- linux-3.0.8/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52533 +++ linux-3.0.8/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
52534 @@ -0,0 +1,207 @@
52535 +#include <linux/kernel.h>
52536 +#include <linux/mm.h>
52537 +#include <linux/sched.h>
52538 +#include <linux/poll.h>
52539 +#include <linux/string.h>
52540 +#include <linux/file.h>
52541 +#include <linux/types.h>
52542 +#include <linux/vmalloc.h>
52543 +#include <linux/grinternal.h>
52544 +
52545 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52546 + size_t count, loff_t *ppos);
52547 +extern int gr_acl_is_enabled(void);
52548 +
52549 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52550 +static int gr_learn_attached;
52551 +
52552 +/* use a 512k buffer */
52553 +#define LEARN_BUFFER_SIZE (512 * 1024)
52554 +
52555 +static DEFINE_SPINLOCK(gr_learn_lock);
52556 +static DEFINE_MUTEX(gr_learn_user_mutex);
52557 +
52558 +/* we need to maintain two buffers, so that the kernel context of grlearn
52559 + uses a semaphore around the userspace copying, and the other kernel contexts
52560 + use a spinlock when copying into the buffer, since they cannot sleep
52561 +*/
52562 +static char *learn_buffer;
52563 +static char *learn_buffer_user;
52564 +static int learn_buffer_len;
52565 +static int learn_buffer_user_len;
52566 +
52567 +static ssize_t
52568 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52569 +{
52570 + DECLARE_WAITQUEUE(wait, current);
52571 + ssize_t retval = 0;
52572 +
52573 + add_wait_queue(&learn_wait, &wait);
52574 + set_current_state(TASK_INTERRUPTIBLE);
52575 + do {
52576 + mutex_lock(&gr_learn_user_mutex);
52577 + spin_lock(&gr_learn_lock);
52578 + if (learn_buffer_len)
52579 + break;
52580 + spin_unlock(&gr_learn_lock);
52581 + mutex_unlock(&gr_learn_user_mutex);
52582 + if (file->f_flags & O_NONBLOCK) {
52583 + retval = -EAGAIN;
52584 + goto out;
52585 + }
52586 + if (signal_pending(current)) {
52587 + retval = -ERESTARTSYS;
52588 + goto out;
52589 + }
52590 +
52591 + schedule();
52592 + } while (1);
52593 +
52594 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52595 + learn_buffer_user_len = learn_buffer_len;
52596 + retval = learn_buffer_len;
52597 + learn_buffer_len = 0;
52598 +
52599 + spin_unlock(&gr_learn_lock);
52600 +
52601 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52602 + retval = -EFAULT;
52603 +
52604 + mutex_unlock(&gr_learn_user_mutex);
52605 +out:
52606 + set_current_state(TASK_RUNNING);
52607 + remove_wait_queue(&learn_wait, &wait);
52608 + return retval;
52609 +}
52610 +
52611 +static unsigned int
52612 +poll_learn(struct file * file, poll_table * wait)
52613 +{
52614 + poll_wait(file, &learn_wait, wait);
52615 +
52616 + if (learn_buffer_len)
52617 + return (POLLIN | POLLRDNORM);
52618 +
52619 + return 0;
52620 +}
52621 +
52622 +void
52623 +gr_clear_learn_entries(void)
52624 +{
52625 + char *tmp;
52626 +
52627 + mutex_lock(&gr_learn_user_mutex);
52628 + spin_lock(&gr_learn_lock);
52629 + tmp = learn_buffer;
52630 + learn_buffer = NULL;
52631 + spin_unlock(&gr_learn_lock);
52632 + if (tmp)
52633 + vfree(tmp);
52634 + if (learn_buffer_user != NULL) {
52635 + vfree(learn_buffer_user);
52636 + learn_buffer_user = NULL;
52637 + }
52638 + learn_buffer_len = 0;
52639 + mutex_unlock(&gr_learn_user_mutex);
52640 +
52641 + return;
52642 +}
52643 +
52644 +void
52645 +gr_add_learn_entry(const char *fmt, ...)
52646 +{
52647 + va_list args;
52648 + unsigned int len;
52649 +
52650 + if (!gr_learn_attached)
52651 + return;
52652 +
52653 + spin_lock(&gr_learn_lock);
52654 +
52655 + /* leave a gap at the end so we know when it's "full" but don't have to
52656 + compute the exact length of the string we're trying to append
52657 + */
52658 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52659 + spin_unlock(&gr_learn_lock);
52660 + wake_up_interruptible(&learn_wait);
52661 + return;
52662 + }
52663 + if (learn_buffer == NULL) {
52664 + spin_unlock(&gr_learn_lock);
52665 + return;
52666 + }
52667 +
52668 + va_start(args, fmt);
52669 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52670 + va_end(args);
52671 +
52672 + learn_buffer_len += len + 1;
52673 +
52674 + spin_unlock(&gr_learn_lock);
52675 + wake_up_interruptible(&learn_wait);
52676 +
52677 + return;
52678 +}
52679 +
52680 +static int
52681 +open_learn(struct inode *inode, struct file *file)
52682 +{
52683 + if (file->f_mode & FMODE_READ && gr_learn_attached)
52684 + return -EBUSY;
52685 + if (file->f_mode & FMODE_READ) {
52686 + int retval = 0;
52687 + mutex_lock(&gr_learn_user_mutex);
52688 + if (learn_buffer == NULL)
52689 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52690 + if (learn_buffer_user == NULL)
52691 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52692 + if (learn_buffer == NULL) {
52693 + retval = -ENOMEM;
52694 + goto out_error;
52695 + }
52696 + if (learn_buffer_user == NULL) {
52697 + retval = -ENOMEM;
52698 + goto out_error;
52699 + }
52700 + learn_buffer_len = 0;
52701 + learn_buffer_user_len = 0;
52702 + gr_learn_attached = 1;
52703 +out_error:
52704 + mutex_unlock(&gr_learn_user_mutex);
52705 + return retval;
52706 + }
52707 + return 0;
52708 +}
52709 +
52710 +static int
52711 +close_learn(struct inode *inode, struct file *file)
52712 +{
52713 + if (file->f_mode & FMODE_READ) {
52714 + char *tmp = NULL;
52715 + mutex_lock(&gr_learn_user_mutex);
52716 + spin_lock(&gr_learn_lock);
52717 + tmp = learn_buffer;
52718 + learn_buffer = NULL;
52719 + spin_unlock(&gr_learn_lock);
52720 + if (tmp)
52721 + vfree(tmp);
52722 + if (learn_buffer_user != NULL) {
52723 + vfree(learn_buffer_user);
52724 + learn_buffer_user = NULL;
52725 + }
52726 + learn_buffer_len = 0;
52727 + learn_buffer_user_len = 0;
52728 + gr_learn_attached = 0;
52729 + mutex_unlock(&gr_learn_user_mutex);
52730 + }
52731 +
52732 + return 0;
52733 +}
52734 +
52735 +const struct file_operations grsec_fops = {
52736 + .read = read_learn,
52737 + .write = write_grsec_handler,
52738 + .open = open_learn,
52739 + .release = close_learn,
52740 + .poll = poll_learn,
52741 +};
52742 diff -urNp linux-3.0.8/grsecurity/gracl_res.c linux-3.0.8/grsecurity/gracl_res.c
52743 --- linux-3.0.8/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52744 +++ linux-3.0.8/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
52745 @@ -0,0 +1,68 @@
52746 +#include <linux/kernel.h>
52747 +#include <linux/sched.h>
52748 +#include <linux/gracl.h>
52749 +#include <linux/grinternal.h>
52750 +
52751 +static const char *restab_log[] = {
52752 + [RLIMIT_CPU] = "RLIMIT_CPU",
52753 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52754 + [RLIMIT_DATA] = "RLIMIT_DATA",
52755 + [RLIMIT_STACK] = "RLIMIT_STACK",
52756 + [RLIMIT_CORE] = "RLIMIT_CORE",
52757 + [RLIMIT_RSS] = "RLIMIT_RSS",
52758 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
52759 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52760 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52761 + [RLIMIT_AS] = "RLIMIT_AS",
52762 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52763 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52764 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52765 + [RLIMIT_NICE] = "RLIMIT_NICE",
52766 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52767 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52768 + [GR_CRASH_RES] = "RLIMIT_CRASH"
52769 +};
52770 +
52771 +void
52772 +gr_log_resource(const struct task_struct *task,
52773 + const int res, const unsigned long wanted, const int gt)
52774 +{
52775 + const struct cred *cred;
52776 + unsigned long rlim;
52777 +
52778 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
52779 + return;
52780 +
52781 + // not yet supported resource
52782 + if (unlikely(!restab_log[res]))
52783 + return;
52784 +
52785 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52786 + rlim = task_rlimit_max(task, res);
52787 + else
52788 + rlim = task_rlimit(task, res);
52789 +
52790 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52791 + return;
52792 +
52793 + rcu_read_lock();
52794 + cred = __task_cred(task);
52795 +
52796 + if (res == RLIMIT_NPROC &&
52797 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52798 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52799 + goto out_rcu_unlock;
52800 + else if (res == RLIMIT_MEMLOCK &&
52801 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52802 + goto out_rcu_unlock;
52803 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52804 + goto out_rcu_unlock;
52805 + rcu_read_unlock();
52806 +
52807 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52808 +
52809 + return;
52810 +out_rcu_unlock:
52811 + rcu_read_unlock();
52812 + return;
52813 +}
52814 diff -urNp linux-3.0.8/grsecurity/gracl_segv.c linux-3.0.8/grsecurity/gracl_segv.c
52815 --- linux-3.0.8/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52816 +++ linux-3.0.8/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
52817 @@ -0,0 +1,299 @@
52818 +#include <linux/kernel.h>
52819 +#include <linux/mm.h>
52820 +#include <asm/uaccess.h>
52821 +#include <asm/errno.h>
52822 +#include <asm/mman.h>
52823 +#include <net/sock.h>
52824 +#include <linux/file.h>
52825 +#include <linux/fs.h>
52826 +#include <linux/net.h>
52827 +#include <linux/in.h>
52828 +#include <linux/slab.h>
52829 +#include <linux/types.h>
52830 +#include <linux/sched.h>
52831 +#include <linux/timer.h>
52832 +#include <linux/gracl.h>
52833 +#include <linux/grsecurity.h>
52834 +#include <linux/grinternal.h>
52835 +
52836 +static struct crash_uid *uid_set;
52837 +static unsigned short uid_used;
52838 +static DEFINE_SPINLOCK(gr_uid_lock);
52839 +extern rwlock_t gr_inode_lock;
52840 +extern struct acl_subject_label *
52841 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52842 + struct acl_role_label *role);
52843 +
52844 +#ifdef CONFIG_BTRFS_FS
52845 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52846 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52847 +#endif
52848 +
52849 +static inline dev_t __get_dev(const struct dentry *dentry)
52850 +{
52851 +#ifdef CONFIG_BTRFS_FS
52852 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52853 + return get_btrfs_dev_from_inode(dentry->d_inode);
52854 + else
52855 +#endif
52856 + return dentry->d_inode->i_sb->s_dev;
52857 +}
52858 +
52859 +int
52860 +gr_init_uidset(void)
52861 +{
52862 + uid_set =
52863 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52864 + uid_used = 0;
52865 +
52866 + return uid_set ? 1 : 0;
52867 +}
52868 +
52869 +void
52870 +gr_free_uidset(void)
52871 +{
52872 + if (uid_set)
52873 + kfree(uid_set);
52874 +
52875 + return;
52876 +}
52877 +
52878 +int
52879 +gr_find_uid(const uid_t uid)
52880 +{
52881 + struct crash_uid *tmp = uid_set;
52882 + uid_t buid;
52883 + int low = 0, high = uid_used - 1, mid;
52884 +
52885 + while (high >= low) {
52886 + mid = (low + high) >> 1;
52887 + buid = tmp[mid].uid;
52888 + if (buid == uid)
52889 + return mid;
52890 + if (buid > uid)
52891 + high = mid - 1;
52892 + if (buid < uid)
52893 + low = mid + 1;
52894 + }
52895 +
52896 + return -1;
52897 +}
52898 +
52899 +static __inline__ void
52900 +gr_insertsort(void)
52901 +{
52902 + unsigned short i, j;
52903 + struct crash_uid index;
52904 +
52905 + for (i = 1; i < uid_used; i++) {
52906 + index = uid_set[i];
52907 + j = i;
52908 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52909 + uid_set[j] = uid_set[j - 1];
52910 + j--;
52911 + }
52912 + uid_set[j] = index;
52913 + }
52914 +
52915 + return;
52916 +}
52917 +
52918 +static __inline__ void
52919 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52920 +{
52921 + int loc;
52922 +
52923 + if (uid_used == GR_UIDTABLE_MAX)
52924 + return;
52925 +
52926 + loc = gr_find_uid(uid);
52927 +
52928 + if (loc >= 0) {
52929 + uid_set[loc].expires = expires;
52930 + return;
52931 + }
52932 +
52933 + uid_set[uid_used].uid = uid;
52934 + uid_set[uid_used].expires = expires;
52935 + uid_used++;
52936 +
52937 + gr_insertsort();
52938 +
52939 + return;
52940 +}
52941 +
52942 +void
52943 +gr_remove_uid(const unsigned short loc)
52944 +{
52945 + unsigned short i;
52946 +
52947 + for (i = loc + 1; i < uid_used; i++)
52948 + uid_set[i - 1] = uid_set[i];
52949 +
52950 + uid_used--;
52951 +
52952 + return;
52953 +}
52954 +
52955 +int
52956 +gr_check_crash_uid(const uid_t uid)
52957 +{
52958 + int loc;
52959 + int ret = 0;
52960 +
52961 + if (unlikely(!gr_acl_is_enabled()))
52962 + return 0;
52963 +
52964 + spin_lock(&gr_uid_lock);
52965 + loc = gr_find_uid(uid);
52966 +
52967 + if (loc < 0)
52968 + goto out_unlock;
52969 +
52970 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52971 + gr_remove_uid(loc);
52972 + else
52973 + ret = 1;
52974 +
52975 +out_unlock:
52976 + spin_unlock(&gr_uid_lock);
52977 + return ret;
52978 +}
52979 +
52980 +static __inline__ int
52981 +proc_is_setxid(const struct cred *cred)
52982 +{
52983 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52984 + cred->uid != cred->fsuid)
52985 + return 1;
52986 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52987 + cred->gid != cred->fsgid)
52988 + return 1;
52989 +
52990 + return 0;
52991 +}
52992 +
52993 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
52994 +
52995 +void
52996 +gr_handle_crash(struct task_struct *task, const int sig)
52997 +{
52998 + struct acl_subject_label *curr;
52999 + struct acl_subject_label *curr2;
53000 + struct task_struct *tsk, *tsk2;
53001 + const struct cred *cred;
53002 + const struct cred *cred2;
53003 +
53004 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53005 + return;
53006 +
53007 + if (unlikely(!gr_acl_is_enabled()))
53008 + return;
53009 +
53010 + curr = task->acl;
53011 +
53012 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
53013 + return;
53014 +
53015 + if (time_before_eq(curr->expires, get_seconds())) {
53016 + curr->expires = 0;
53017 + curr->crashes = 0;
53018 + }
53019 +
53020 + curr->crashes++;
53021 +
53022 + if (!curr->expires)
53023 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53024 +
53025 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53026 + time_after(curr->expires, get_seconds())) {
53027 + rcu_read_lock();
53028 + cred = __task_cred(task);
53029 + if (cred->uid && proc_is_setxid(cred)) {
53030 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53031 + spin_lock(&gr_uid_lock);
53032 + gr_insert_uid(cred->uid, curr->expires);
53033 + spin_unlock(&gr_uid_lock);
53034 + curr->expires = 0;
53035 + curr->crashes = 0;
53036 + read_lock(&tasklist_lock);
53037 + do_each_thread(tsk2, tsk) {
53038 + cred2 = __task_cred(tsk);
53039 + if (tsk != task && cred2->uid == cred->uid)
53040 + gr_fake_force_sig(SIGKILL, tsk);
53041 + } while_each_thread(tsk2, tsk);
53042 + read_unlock(&tasklist_lock);
53043 + } else {
53044 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53045 + read_lock(&tasklist_lock);
53046 + do_each_thread(tsk2, tsk) {
53047 + if (likely(tsk != task)) {
53048 + curr2 = tsk->acl;
53049 +
53050 + if (curr2->device == curr->device &&
53051 + curr2->inode == curr->inode)
53052 + gr_fake_force_sig(SIGKILL, tsk);
53053 + }
53054 + } while_each_thread(tsk2, tsk);
53055 + read_unlock(&tasklist_lock);
53056 + }
53057 + rcu_read_unlock();
53058 + }
53059 +
53060 + return;
53061 +}
53062 +
53063 +int
53064 +gr_check_crash_exec(const struct file *filp)
53065 +{
53066 + struct acl_subject_label *curr;
53067 +
53068 + if (unlikely(!gr_acl_is_enabled()))
53069 + return 0;
53070 +
53071 + read_lock(&gr_inode_lock);
53072 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53073 + __get_dev(filp->f_path.dentry),
53074 + current->role);
53075 + read_unlock(&gr_inode_lock);
53076 +
53077 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53078 + (!curr->crashes && !curr->expires))
53079 + return 0;
53080 +
53081 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53082 + time_after(curr->expires, get_seconds()))
53083 + return 1;
53084 + else if (time_before_eq(curr->expires, get_seconds())) {
53085 + curr->crashes = 0;
53086 + curr->expires = 0;
53087 + }
53088 +
53089 + return 0;
53090 +}
53091 +
53092 +void
53093 +gr_handle_alertkill(struct task_struct *task)
53094 +{
53095 + struct acl_subject_label *curracl;
53096 + __u32 curr_ip;
53097 + struct task_struct *p, *p2;
53098 +
53099 + if (unlikely(!gr_acl_is_enabled()))
53100 + return;
53101 +
53102 + curracl = task->acl;
53103 + curr_ip = task->signal->curr_ip;
53104 +
53105 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53106 + read_lock(&tasklist_lock);
53107 + do_each_thread(p2, p) {
53108 + if (p->signal->curr_ip == curr_ip)
53109 + gr_fake_force_sig(SIGKILL, p);
53110 + } while_each_thread(p2, p);
53111 + read_unlock(&tasklist_lock);
53112 + } else if (curracl->mode & GR_KILLPROC)
53113 + gr_fake_force_sig(SIGKILL, task);
53114 +
53115 + return;
53116 +}
53117 diff -urNp linux-3.0.8/grsecurity/gracl_shm.c linux-3.0.8/grsecurity/gracl_shm.c
53118 --- linux-3.0.8/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
53119 +++ linux-3.0.8/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
53120 @@ -0,0 +1,40 @@
53121 +#include <linux/kernel.h>
53122 +#include <linux/mm.h>
53123 +#include <linux/sched.h>
53124 +#include <linux/file.h>
53125 +#include <linux/ipc.h>
53126 +#include <linux/gracl.h>
53127 +#include <linux/grsecurity.h>
53128 +#include <linux/grinternal.h>
53129 +
53130 +int
53131 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53132 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53133 +{
53134 + struct task_struct *task;
53135 +
53136 + if (!gr_acl_is_enabled())
53137 + return 1;
53138 +
53139 + rcu_read_lock();
53140 + read_lock(&tasklist_lock);
53141 +
53142 + task = find_task_by_vpid(shm_cprid);
53143 +
53144 + if (unlikely(!task))
53145 + task = find_task_by_vpid(shm_lapid);
53146 +
53147 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53148 + (task->pid == shm_lapid)) &&
53149 + (task->acl->mode & GR_PROTSHM) &&
53150 + (task->acl != current->acl))) {
53151 + read_unlock(&tasklist_lock);
53152 + rcu_read_unlock();
53153 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53154 + return 0;
53155 + }
53156 + read_unlock(&tasklist_lock);
53157 + rcu_read_unlock();
53158 +
53159 + return 1;
53160 +}
53161 diff -urNp linux-3.0.8/grsecurity/grsec_chdir.c linux-3.0.8/grsecurity/grsec_chdir.c
53162 --- linux-3.0.8/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
53163 +++ linux-3.0.8/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
53164 @@ -0,0 +1,19 @@
53165 +#include <linux/kernel.h>
53166 +#include <linux/sched.h>
53167 +#include <linux/fs.h>
53168 +#include <linux/file.h>
53169 +#include <linux/grsecurity.h>
53170 +#include <linux/grinternal.h>
53171 +
53172 +void
53173 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53174 +{
53175 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53176 + if ((grsec_enable_chdir && grsec_enable_group &&
53177 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53178 + !grsec_enable_group)) {
53179 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53180 + }
53181 +#endif
53182 + return;
53183 +}
53184 diff -urNp linux-3.0.8/grsecurity/grsec_chroot.c linux-3.0.8/grsecurity/grsec_chroot.c
53185 --- linux-3.0.8/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53186 +++ linux-3.0.8/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
53187 @@ -0,0 +1,351 @@
53188 +#include <linux/kernel.h>
53189 +#include <linux/module.h>
53190 +#include <linux/sched.h>
53191 +#include <linux/file.h>
53192 +#include <linux/fs.h>
53193 +#include <linux/mount.h>
53194 +#include <linux/types.h>
53195 +#include <linux/pid_namespace.h>
53196 +#include <linux/grsecurity.h>
53197 +#include <linux/grinternal.h>
53198 +
53199 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53200 +{
53201 +#ifdef CONFIG_GRKERNSEC
53202 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53203 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53204 + task->gr_is_chrooted = 1;
53205 + else
53206 + task->gr_is_chrooted = 0;
53207 +
53208 + task->gr_chroot_dentry = path->dentry;
53209 +#endif
53210 + return;
53211 +}
53212 +
53213 +void gr_clear_chroot_entries(struct task_struct *task)
53214 +{
53215 +#ifdef CONFIG_GRKERNSEC
53216 + task->gr_is_chrooted = 0;
53217 + task->gr_chroot_dentry = NULL;
53218 +#endif
53219 + return;
53220 +}
53221 +
53222 +int
53223 +gr_handle_chroot_unix(const pid_t pid)
53224 +{
53225 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53226 + struct task_struct *p;
53227 +
53228 + if (unlikely(!grsec_enable_chroot_unix))
53229 + return 1;
53230 +
53231 + if (likely(!proc_is_chrooted(current)))
53232 + return 1;
53233 +
53234 + rcu_read_lock();
53235 + read_lock(&tasklist_lock);
53236 + p = find_task_by_vpid_unrestricted(pid);
53237 + if (unlikely(p && !have_same_root(current, p))) {
53238 + read_unlock(&tasklist_lock);
53239 + rcu_read_unlock();
53240 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53241 + return 0;
53242 + }
53243 + read_unlock(&tasklist_lock);
53244 + rcu_read_unlock();
53245 +#endif
53246 + return 1;
53247 +}
53248 +
53249 +int
53250 +gr_handle_chroot_nice(void)
53251 +{
53252 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53253 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53254 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53255 + return -EPERM;
53256 + }
53257 +#endif
53258 + return 0;
53259 +}
53260 +
53261 +int
53262 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53263 +{
53264 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53265 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53266 + && proc_is_chrooted(current)) {
53267 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53268 + return -EACCES;
53269 + }
53270 +#endif
53271 + return 0;
53272 +}
53273 +
53274 +int
53275 +gr_handle_chroot_rawio(const struct inode *inode)
53276 +{
53277 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53278 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53279 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53280 + return 1;
53281 +#endif
53282 + return 0;
53283 +}
53284 +
53285 +int
53286 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53287 +{
53288 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53289 + struct task_struct *p;
53290 + int ret = 0;
53291 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53292 + return ret;
53293 +
53294 + read_lock(&tasklist_lock);
53295 + do_each_pid_task(pid, type, p) {
53296 + if (!have_same_root(current, p)) {
53297 + ret = 1;
53298 + goto out;
53299 + }
53300 + } while_each_pid_task(pid, type, p);
53301 +out:
53302 + read_unlock(&tasklist_lock);
53303 + return ret;
53304 +#endif
53305 + return 0;
53306 +}
53307 +
53308 +int
53309 +gr_pid_is_chrooted(struct task_struct *p)
53310 +{
53311 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53312 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53313 + return 0;
53314 +
53315 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53316 + !have_same_root(current, p)) {
53317 + return 1;
53318 + }
53319 +#endif
53320 + return 0;
53321 +}
53322 +
53323 +EXPORT_SYMBOL(gr_pid_is_chrooted);
53324 +
53325 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53326 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53327 +{
53328 + struct path path, currentroot;
53329 + int ret = 0;
53330 +
53331 + path.dentry = (struct dentry *)u_dentry;
53332 + path.mnt = (struct vfsmount *)u_mnt;
53333 + get_fs_root(current->fs, &currentroot);
53334 + if (path_is_under(&path, &currentroot))
53335 + ret = 1;
53336 + path_put(&currentroot);
53337 +
53338 + return ret;
53339 +}
53340 +#endif
53341 +
53342 +int
53343 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53344 +{
53345 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53346 + if (!grsec_enable_chroot_fchdir)
53347 + return 1;
53348 +
53349 + if (!proc_is_chrooted(current))
53350 + return 1;
53351 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53352 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53353 + return 0;
53354 + }
53355 +#endif
53356 + return 1;
53357 +}
53358 +
53359 +int
53360 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53361 + const time_t shm_createtime)
53362 +{
53363 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53364 + struct task_struct *p;
53365 + time_t starttime;
53366 +
53367 + if (unlikely(!grsec_enable_chroot_shmat))
53368 + return 1;
53369 +
53370 + if (likely(!proc_is_chrooted(current)))
53371 + return 1;
53372 +
53373 + rcu_read_lock();
53374 + read_lock(&tasklist_lock);
53375 +
53376 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53377 + starttime = p->start_time.tv_sec;
53378 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53379 + if (have_same_root(current, p)) {
53380 + goto allow;
53381 + } else {
53382 + read_unlock(&tasklist_lock);
53383 + rcu_read_unlock();
53384 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53385 + return 0;
53386 + }
53387 + }
53388 + /* creator exited, pid reuse, fall through to next check */
53389 + }
53390 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53391 + if (unlikely(!have_same_root(current, p))) {
53392 + read_unlock(&tasklist_lock);
53393 + rcu_read_unlock();
53394 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53395 + return 0;
53396 + }
53397 + }
53398 +
53399 +allow:
53400 + read_unlock(&tasklist_lock);
53401 + rcu_read_unlock();
53402 +#endif
53403 + return 1;
53404 +}
53405 +
53406 +void
53407 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53408 +{
53409 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53410 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53411 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53412 +#endif
53413 + return;
53414 +}
53415 +
53416 +int
53417 +gr_handle_chroot_mknod(const struct dentry *dentry,
53418 + const struct vfsmount *mnt, const int mode)
53419 +{
53420 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53421 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53422 + proc_is_chrooted(current)) {
53423 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53424 + return -EPERM;
53425 + }
53426 +#endif
53427 + return 0;
53428 +}
53429 +
53430 +int
53431 +gr_handle_chroot_mount(const struct dentry *dentry,
53432 + const struct vfsmount *mnt, const char *dev_name)
53433 +{
53434 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53435 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53436 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
53437 + return -EPERM;
53438 + }
53439 +#endif
53440 + return 0;
53441 +}
53442 +
53443 +int
53444 +gr_handle_chroot_pivot(void)
53445 +{
53446 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53447 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53448 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53449 + return -EPERM;
53450 + }
53451 +#endif
53452 + return 0;
53453 +}
53454 +
53455 +int
53456 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53457 +{
53458 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53459 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53460 + !gr_is_outside_chroot(dentry, mnt)) {
53461 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53462 + return -EPERM;
53463 + }
53464 +#endif
53465 + return 0;
53466 +}
53467 +
53468 +extern const char *captab_log[];
53469 +extern int captab_log_entries;
53470 +
53471 +int
53472 +gr_chroot_is_capable(const int cap)
53473 +{
53474 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53475 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53476 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53477 + if (cap_raised(chroot_caps, cap)) {
53478 + const struct cred *creds = current_cred();
53479 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
53480 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
53481 + }
53482 + return 0;
53483 + }
53484 + }
53485 +#endif
53486 + return 1;
53487 +}
53488 +
53489 +int
53490 +gr_chroot_is_capable_nolog(const int cap)
53491 +{
53492 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53493 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53494 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53495 + if (cap_raised(chroot_caps, cap)) {
53496 + return 0;
53497 + }
53498 + }
53499 +#endif
53500 + return 1;
53501 +}
53502 +
53503 +int
53504 +gr_handle_chroot_sysctl(const int op)
53505 +{
53506 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53507 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
53508 + proc_is_chrooted(current))
53509 + return -EACCES;
53510 +#endif
53511 + return 0;
53512 +}
53513 +
53514 +void
53515 +gr_handle_chroot_chdir(struct path *path)
53516 +{
53517 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53518 + if (grsec_enable_chroot_chdir)
53519 + set_fs_pwd(current->fs, path);
53520 +#endif
53521 + return;
53522 +}
53523 +
53524 +int
53525 +gr_handle_chroot_chmod(const struct dentry *dentry,
53526 + const struct vfsmount *mnt, const int mode)
53527 +{
53528 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53529 + /* allow chmod +s on directories, but not files */
53530 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53531 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53532 + proc_is_chrooted(current)) {
53533 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53534 + return -EPERM;
53535 + }
53536 +#endif
53537 + return 0;
53538 +}
53539 diff -urNp linux-3.0.8/grsecurity/grsec_disabled.c linux-3.0.8/grsecurity/grsec_disabled.c
53540 --- linux-3.0.8/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53541 +++ linux-3.0.8/grsecurity/grsec_disabled.c 2011-10-25 09:17:34.000000000 -0400
53542 @@ -0,0 +1,439 @@
53543 +#include <linux/kernel.h>
53544 +#include <linux/module.h>
53545 +#include <linux/sched.h>
53546 +#include <linux/file.h>
53547 +#include <linux/fs.h>
53548 +#include <linux/kdev_t.h>
53549 +#include <linux/net.h>
53550 +#include <linux/in.h>
53551 +#include <linux/ip.h>
53552 +#include <linux/skbuff.h>
53553 +#include <linux/sysctl.h>
53554 +
53555 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53556 +void
53557 +pax_set_initial_flags(struct linux_binprm *bprm)
53558 +{
53559 + return;
53560 +}
53561 +#endif
53562 +
53563 +#ifdef CONFIG_SYSCTL
53564 +__u32
53565 +gr_handle_sysctl(const struct ctl_table * table, const int op)
53566 +{
53567 + return 0;
53568 +}
53569 +#endif
53570 +
53571 +#ifdef CONFIG_TASKSTATS
53572 +int gr_is_taskstats_denied(int pid)
53573 +{
53574 + return 0;
53575 +}
53576 +#endif
53577 +
53578 +int
53579 +gr_acl_is_enabled(void)
53580 +{
53581 + return 0;
53582 +}
53583 +
53584 +void
53585 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53586 +{
53587 + return;
53588 +}
53589 +
53590 +int
53591 +gr_handle_rawio(const struct inode *inode)
53592 +{
53593 + return 0;
53594 +}
53595 +
53596 +void
53597 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53598 +{
53599 + return;
53600 +}
53601 +
53602 +int
53603 +gr_handle_ptrace(struct task_struct *task, const long request)
53604 +{
53605 + return 0;
53606 +}
53607 +
53608 +int
53609 +gr_handle_proc_ptrace(struct task_struct *task)
53610 +{
53611 + return 0;
53612 +}
53613 +
53614 +void
53615 +gr_learn_resource(const struct task_struct *task,
53616 + const int res, const unsigned long wanted, const int gt)
53617 +{
53618 + return;
53619 +}
53620 +
53621 +int
53622 +gr_set_acls(const int type)
53623 +{
53624 + return 0;
53625 +}
53626 +
53627 +int
53628 +gr_check_hidden_task(const struct task_struct *tsk)
53629 +{
53630 + return 0;
53631 +}
53632 +
53633 +int
53634 +gr_check_protected_task(const struct task_struct *task)
53635 +{
53636 + return 0;
53637 +}
53638 +
53639 +int
53640 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53641 +{
53642 + return 0;
53643 +}
53644 +
53645 +void
53646 +gr_copy_label(struct task_struct *tsk)
53647 +{
53648 + return;
53649 +}
53650 +
53651 +void
53652 +gr_set_pax_flags(struct task_struct *task)
53653 +{
53654 + return;
53655 +}
53656 +
53657 +int
53658 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53659 + const int unsafe_share)
53660 +{
53661 + return 0;
53662 +}
53663 +
53664 +void
53665 +gr_handle_delete(const ino_t ino, const dev_t dev)
53666 +{
53667 + return;
53668 +}
53669 +
53670 +void
53671 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53672 +{
53673 + return;
53674 +}
53675 +
53676 +void
53677 +gr_handle_crash(struct task_struct *task, const int sig)
53678 +{
53679 + return;
53680 +}
53681 +
53682 +int
53683 +gr_check_crash_exec(const struct file *filp)
53684 +{
53685 + return 0;
53686 +}
53687 +
53688 +int
53689 +gr_check_crash_uid(const uid_t uid)
53690 +{
53691 + return 0;
53692 +}
53693 +
53694 +void
53695 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53696 + struct dentry *old_dentry,
53697 + struct dentry *new_dentry,
53698 + struct vfsmount *mnt, const __u8 replace)
53699 +{
53700 + return;
53701 +}
53702 +
53703 +int
53704 +gr_search_socket(const int family, const int type, const int protocol)
53705 +{
53706 + return 1;
53707 +}
53708 +
53709 +int
53710 +gr_search_connectbind(const int mode, const struct socket *sock,
53711 + const struct sockaddr_in *addr)
53712 +{
53713 + return 0;
53714 +}
53715 +
53716 +void
53717 +gr_handle_alertkill(struct task_struct *task)
53718 +{
53719 + return;
53720 +}
53721 +
53722 +__u32
53723 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53724 +{
53725 + return 1;
53726 +}
53727 +
53728 +__u32
53729 +gr_acl_handle_hidden_file(const struct dentry * dentry,
53730 + const struct vfsmount * mnt)
53731 +{
53732 + return 1;
53733 +}
53734 +
53735 +__u32
53736 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53737 + const int fmode)
53738 +{
53739 + return 1;
53740 +}
53741 +
53742 +__u32
53743 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53744 +{
53745 + return 1;
53746 +}
53747 +
53748 +__u32
53749 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53750 +{
53751 + return 1;
53752 +}
53753 +
53754 +int
53755 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53756 + unsigned int *vm_flags)
53757 +{
53758 + return 1;
53759 +}
53760 +
53761 +__u32
53762 +gr_acl_handle_truncate(const struct dentry * dentry,
53763 + const struct vfsmount * mnt)
53764 +{
53765 + return 1;
53766 +}
53767 +
53768 +__u32
53769 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53770 +{
53771 + return 1;
53772 +}
53773 +
53774 +__u32
53775 +gr_acl_handle_access(const struct dentry * dentry,
53776 + const struct vfsmount * mnt, const int fmode)
53777 +{
53778 + return 1;
53779 +}
53780 +
53781 +__u32
53782 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53783 + mode_t mode)
53784 +{
53785 + return 1;
53786 +}
53787 +
53788 +__u32
53789 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53790 + mode_t mode)
53791 +{
53792 + return 1;
53793 +}
53794 +
53795 +__u32
53796 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53797 +{
53798 + return 1;
53799 +}
53800 +
53801 +__u32
53802 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53803 +{
53804 + return 1;
53805 +}
53806 +
53807 +void
53808 +grsecurity_init(void)
53809 +{
53810 + return;
53811 +}
53812 +
53813 +__u32
53814 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53815 + const struct dentry * parent_dentry,
53816 + const struct vfsmount * parent_mnt,
53817 + const int mode)
53818 +{
53819 + return 1;
53820 +}
53821 +
53822 +__u32
53823 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
53824 + const struct dentry * parent_dentry,
53825 + const struct vfsmount * parent_mnt)
53826 +{
53827 + return 1;
53828 +}
53829 +
53830 +__u32
53831 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53832 + const struct dentry * parent_dentry,
53833 + const struct vfsmount * parent_mnt, const char *from)
53834 +{
53835 + return 1;
53836 +}
53837 +
53838 +__u32
53839 +gr_acl_handle_link(const struct dentry * new_dentry,
53840 + const struct dentry * parent_dentry,
53841 + const struct vfsmount * parent_mnt,
53842 + const struct dentry * old_dentry,
53843 + const struct vfsmount * old_mnt, const char *to)
53844 +{
53845 + return 1;
53846 +}
53847 +
53848 +int
53849 +gr_acl_handle_rename(const struct dentry *new_dentry,
53850 + const struct dentry *parent_dentry,
53851 + const struct vfsmount *parent_mnt,
53852 + const struct dentry *old_dentry,
53853 + const struct inode *old_parent_inode,
53854 + const struct vfsmount *old_mnt, const char *newname)
53855 +{
53856 + return 0;
53857 +}
53858 +
53859 +int
53860 +gr_acl_handle_filldir(const struct file *file, const char *name,
53861 + const int namelen, const ino_t ino)
53862 +{
53863 + return 1;
53864 +}
53865 +
53866 +int
53867 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53868 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53869 +{
53870 + return 1;
53871 +}
53872 +
53873 +int
53874 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53875 +{
53876 + return 0;
53877 +}
53878 +
53879 +int
53880 +gr_search_accept(const struct socket *sock)
53881 +{
53882 + return 0;
53883 +}
53884 +
53885 +int
53886 +gr_search_listen(const struct socket *sock)
53887 +{
53888 + return 0;
53889 +}
53890 +
53891 +int
53892 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53893 +{
53894 + return 0;
53895 +}
53896 +
53897 +__u32
53898 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53899 +{
53900 + return 1;
53901 +}
53902 +
53903 +__u32
53904 +gr_acl_handle_creat(const struct dentry * dentry,
53905 + const struct dentry * p_dentry,
53906 + const struct vfsmount * p_mnt, const int fmode,
53907 + const int imode)
53908 +{
53909 + return 1;
53910 +}
53911 +
53912 +void
53913 +gr_acl_handle_exit(void)
53914 +{
53915 + return;
53916 +}
53917 +
53918 +int
53919 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53920 +{
53921 + return 1;
53922 +}
53923 +
53924 +void
53925 +gr_set_role_label(const uid_t uid, const gid_t gid)
53926 +{
53927 + return;
53928 +}
53929 +
53930 +int
53931 +gr_acl_handle_procpidmem(const struct task_struct *task)
53932 +{
53933 + return 0;
53934 +}
53935 +
53936 +int
53937 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53938 +{
53939 + return 0;
53940 +}
53941 +
53942 +int
53943 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53944 +{
53945 + return 0;
53946 +}
53947 +
53948 +void
53949 +gr_set_kernel_label(struct task_struct *task)
53950 +{
53951 + return;
53952 +}
53953 +
53954 +int
53955 +gr_check_user_change(int real, int effective, int fs)
53956 +{
53957 + return 0;
53958 +}
53959 +
53960 +int
53961 +gr_check_group_change(int real, int effective, int fs)
53962 +{
53963 + return 0;
53964 +}
53965 +
53966 +int gr_acl_enable_at_secure(void)
53967 +{
53968 + return 0;
53969 +}
53970 +
53971 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53972 +{
53973 + return dentry->d_inode->i_sb->s_dev;
53974 +}
53975 +
53976 +EXPORT_SYMBOL(gr_learn_resource);
53977 +EXPORT_SYMBOL(gr_set_kernel_label);
53978 +#ifdef CONFIG_SECURITY
53979 +EXPORT_SYMBOL(gr_check_user_change);
53980 +EXPORT_SYMBOL(gr_check_group_change);
53981 +#endif
53982 diff -urNp linux-3.0.8/grsecurity/grsec_exec.c linux-3.0.8/grsecurity/grsec_exec.c
53983 --- linux-3.0.8/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53984 +++ linux-3.0.8/grsecurity/grsec_exec.c 2011-10-20 00:50:54.000000000 -0400
53985 @@ -0,0 +1,146 @@
53986 +#include <linux/kernel.h>
53987 +#include <linux/sched.h>
53988 +#include <linux/file.h>
53989 +#include <linux/binfmts.h>
53990 +#include <linux/fs.h>
53991 +#include <linux/types.h>
53992 +#include <linux/grdefs.h>
53993 +#include <linux/grsecurity.h>
53994 +#include <linux/grinternal.h>
53995 +#include <linux/capability.h>
53996 +#include <linux/module.h>
53997 +
53998 +#include <asm/uaccess.h>
53999 +
54000 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54001 +static char gr_exec_arg_buf[132];
54002 +static DEFINE_MUTEX(gr_exec_arg_mutex);
54003 +#endif
54004 +
54005 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54006 +
54007 +void
54008 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54009 +{
54010 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54011 + char *grarg = gr_exec_arg_buf;
54012 + unsigned int i, x, execlen = 0;
54013 + char c;
54014 +
54015 + if (!((grsec_enable_execlog && grsec_enable_group &&
54016 + in_group_p(grsec_audit_gid))
54017 + || (grsec_enable_execlog && !grsec_enable_group)))
54018 + return;
54019 +
54020 + mutex_lock(&gr_exec_arg_mutex);
54021 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
54022 +
54023 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
54024 + const char __user *p;
54025 + unsigned int len;
54026 +
54027 + p = get_user_arg_ptr(argv, i);
54028 + if (IS_ERR(p))
54029 + goto log;
54030 +
54031 + len = strnlen_user(p, 128 - execlen);
54032 + if (len > 128 - execlen)
54033 + len = 128 - execlen;
54034 + else if (len > 0)
54035 + len--;
54036 + if (copy_from_user(grarg + execlen, p, len))
54037 + goto log;
54038 +
54039 + /* rewrite unprintable characters */
54040 + for (x = 0; x < len; x++) {
54041 + c = *(grarg + execlen + x);
54042 + if (c < 32 || c > 126)
54043 + *(grarg + execlen + x) = ' ';
54044 + }
54045 +
54046 + execlen += len;
54047 + *(grarg + execlen) = ' ';
54048 + *(grarg + execlen + 1) = '\0';
54049 + execlen++;
54050 + }
54051 +
54052 + log:
54053 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54054 + bprm->file->f_path.mnt, grarg);
54055 + mutex_unlock(&gr_exec_arg_mutex);
54056 +#endif
54057 + return;
54058 +}
54059 +
54060 +#ifdef CONFIG_GRKERNSEC
54061 +extern int gr_acl_is_capable(const int cap);
54062 +extern int gr_acl_is_capable_nolog(const int cap);
54063 +extern int gr_chroot_is_capable(const int cap);
54064 +extern int gr_chroot_is_capable_nolog(const int cap);
54065 +#endif
54066 +
54067 +const char *captab_log[] = {
54068 + "CAP_CHOWN",
54069 + "CAP_DAC_OVERRIDE",
54070 + "CAP_DAC_READ_SEARCH",
54071 + "CAP_FOWNER",
54072 + "CAP_FSETID",
54073 + "CAP_KILL",
54074 + "CAP_SETGID",
54075 + "CAP_SETUID",
54076 + "CAP_SETPCAP",
54077 + "CAP_LINUX_IMMUTABLE",
54078 + "CAP_NET_BIND_SERVICE",
54079 + "CAP_NET_BROADCAST",
54080 + "CAP_NET_ADMIN",
54081 + "CAP_NET_RAW",
54082 + "CAP_IPC_LOCK",
54083 + "CAP_IPC_OWNER",
54084 + "CAP_SYS_MODULE",
54085 + "CAP_SYS_RAWIO",
54086 + "CAP_SYS_CHROOT",
54087 + "CAP_SYS_PTRACE",
54088 + "CAP_SYS_PACCT",
54089 + "CAP_SYS_ADMIN",
54090 + "CAP_SYS_BOOT",
54091 + "CAP_SYS_NICE",
54092 + "CAP_SYS_RESOURCE",
54093 + "CAP_SYS_TIME",
54094 + "CAP_SYS_TTY_CONFIG",
54095 + "CAP_MKNOD",
54096 + "CAP_LEASE",
54097 + "CAP_AUDIT_WRITE",
54098 + "CAP_AUDIT_CONTROL",
54099 + "CAP_SETFCAP",
54100 + "CAP_MAC_OVERRIDE",
54101 + "CAP_MAC_ADMIN",
54102 + "CAP_SYSLOG",
54103 + "CAP_WAKE_ALARM"
54104 +};
54105 +
54106 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54107 +
54108 +int gr_is_capable(const int cap)
54109 +{
54110 +#ifdef CONFIG_GRKERNSEC
54111 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54112 + return 1;
54113 + return 0;
54114 +#else
54115 + return 1;
54116 +#endif
54117 +}
54118 +
54119 +int gr_is_capable_nolog(const int cap)
54120 +{
54121 +#ifdef CONFIG_GRKERNSEC
54122 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54123 + return 1;
54124 + return 0;
54125 +#else
54126 + return 1;
54127 +#endif
54128 +}
54129 +
54130 +EXPORT_SYMBOL(gr_is_capable);
54131 +EXPORT_SYMBOL(gr_is_capable_nolog);
54132 diff -urNp linux-3.0.8/grsecurity/grsec_fifo.c linux-3.0.8/grsecurity/grsec_fifo.c
54133 --- linux-3.0.8/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
54134 +++ linux-3.0.8/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
54135 @@ -0,0 +1,24 @@
54136 +#include <linux/kernel.h>
54137 +#include <linux/sched.h>
54138 +#include <linux/fs.h>
54139 +#include <linux/file.h>
54140 +#include <linux/grinternal.h>
54141 +
54142 +int
54143 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54144 + const struct dentry *dir, const int flag, const int acc_mode)
54145 +{
54146 +#ifdef CONFIG_GRKERNSEC_FIFO
54147 + const struct cred *cred = current_cred();
54148 +
54149 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54150 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54151 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54152 + (cred->fsuid != dentry->d_inode->i_uid)) {
54153 + if (!inode_permission(dentry->d_inode, acc_mode))
54154 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54155 + return -EACCES;
54156 + }
54157 +#endif
54158 + return 0;
54159 +}
54160 diff -urNp linux-3.0.8/grsecurity/grsec_fork.c linux-3.0.8/grsecurity/grsec_fork.c
54161 --- linux-3.0.8/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
54162 +++ linux-3.0.8/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
54163 @@ -0,0 +1,23 @@
54164 +#include <linux/kernel.h>
54165 +#include <linux/sched.h>
54166 +#include <linux/grsecurity.h>
54167 +#include <linux/grinternal.h>
54168 +#include <linux/errno.h>
54169 +
54170 +void
54171 +gr_log_forkfail(const int retval)
54172 +{
54173 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54174 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54175 + switch (retval) {
54176 + case -EAGAIN:
54177 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54178 + break;
54179 + case -ENOMEM:
54180 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54181 + break;
54182 + }
54183 + }
54184 +#endif
54185 + return;
54186 +}
54187 diff -urNp linux-3.0.8/grsecurity/grsec_init.c linux-3.0.8/grsecurity/grsec_init.c
54188 --- linux-3.0.8/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54189 +++ linux-3.0.8/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
54190 @@ -0,0 +1,269 @@
54191 +#include <linux/kernel.h>
54192 +#include <linux/sched.h>
54193 +#include <linux/mm.h>
54194 +#include <linux/gracl.h>
54195 +#include <linux/slab.h>
54196 +#include <linux/vmalloc.h>
54197 +#include <linux/percpu.h>
54198 +#include <linux/module.h>
54199 +
54200 +int grsec_enable_brute;
54201 +int grsec_enable_link;
54202 +int grsec_enable_dmesg;
54203 +int grsec_enable_harden_ptrace;
54204 +int grsec_enable_fifo;
54205 +int grsec_enable_execlog;
54206 +int grsec_enable_signal;
54207 +int grsec_enable_forkfail;
54208 +int grsec_enable_audit_ptrace;
54209 +int grsec_enable_time;
54210 +int grsec_enable_audit_textrel;
54211 +int grsec_enable_group;
54212 +int grsec_audit_gid;
54213 +int grsec_enable_chdir;
54214 +int grsec_enable_mount;
54215 +int grsec_enable_rofs;
54216 +int grsec_enable_chroot_findtask;
54217 +int grsec_enable_chroot_mount;
54218 +int grsec_enable_chroot_shmat;
54219 +int grsec_enable_chroot_fchdir;
54220 +int grsec_enable_chroot_double;
54221 +int grsec_enable_chroot_pivot;
54222 +int grsec_enable_chroot_chdir;
54223 +int grsec_enable_chroot_chmod;
54224 +int grsec_enable_chroot_mknod;
54225 +int grsec_enable_chroot_nice;
54226 +int grsec_enable_chroot_execlog;
54227 +int grsec_enable_chroot_caps;
54228 +int grsec_enable_chroot_sysctl;
54229 +int grsec_enable_chroot_unix;
54230 +int grsec_enable_tpe;
54231 +int grsec_tpe_gid;
54232 +int grsec_enable_blackhole;
54233 +#ifdef CONFIG_IPV6_MODULE
54234 +EXPORT_SYMBOL(grsec_enable_blackhole);
54235 +#endif
54236 +int grsec_lastack_retries;
54237 +int grsec_enable_tpe_all;
54238 +int grsec_enable_tpe_invert;
54239 +int grsec_enable_socket_all;
54240 +int grsec_socket_all_gid;
54241 +int grsec_enable_socket_client;
54242 +int grsec_socket_client_gid;
54243 +int grsec_enable_socket_server;
54244 +int grsec_socket_server_gid;
54245 +int grsec_resource_logging;
54246 +int grsec_disable_privio;
54247 +int grsec_enable_log_rwxmaps;
54248 +int grsec_lock;
54249 +
54250 +DEFINE_SPINLOCK(grsec_alert_lock);
54251 +unsigned long grsec_alert_wtime = 0;
54252 +unsigned long grsec_alert_fyet = 0;
54253 +
54254 +DEFINE_SPINLOCK(grsec_audit_lock);
54255 +
54256 +DEFINE_RWLOCK(grsec_exec_file_lock);
54257 +
54258 +char *gr_shared_page[4];
54259 +
54260 +char *gr_alert_log_fmt;
54261 +char *gr_audit_log_fmt;
54262 +char *gr_alert_log_buf;
54263 +char *gr_audit_log_buf;
54264 +
54265 +extern struct gr_arg *gr_usermode;
54266 +extern unsigned char *gr_system_salt;
54267 +extern unsigned char *gr_system_sum;
54268 +
54269 +void __init
54270 +grsecurity_init(void)
54271 +{
54272 + int j;
54273 + /* create the per-cpu shared pages */
54274 +
54275 +#ifdef CONFIG_X86
54276 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54277 +#endif
54278 +
54279 + for (j = 0; j < 4; j++) {
54280 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54281 + if (gr_shared_page[j] == NULL) {
54282 + panic("Unable to allocate grsecurity shared page");
54283 + return;
54284 + }
54285 + }
54286 +
54287 + /* allocate log buffers */
54288 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54289 + if (!gr_alert_log_fmt) {
54290 + panic("Unable to allocate grsecurity alert log format buffer");
54291 + return;
54292 + }
54293 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54294 + if (!gr_audit_log_fmt) {
54295 + panic("Unable to allocate grsecurity audit log format buffer");
54296 + return;
54297 + }
54298 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54299 + if (!gr_alert_log_buf) {
54300 + panic("Unable to allocate grsecurity alert log buffer");
54301 + return;
54302 + }
54303 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54304 + if (!gr_audit_log_buf) {
54305 + panic("Unable to allocate grsecurity audit log buffer");
54306 + return;
54307 + }
54308 +
54309 + /* allocate memory for authentication structure */
54310 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54311 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54312 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54313 +
54314 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54315 + panic("Unable to allocate grsecurity authentication structure");
54316 + return;
54317 + }
54318 +
54319 +
54320 +#ifdef CONFIG_GRKERNSEC_IO
54321 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54322 + grsec_disable_privio = 1;
54323 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54324 + grsec_disable_privio = 1;
54325 +#else
54326 + grsec_disable_privio = 0;
54327 +#endif
54328 +#endif
54329 +
54330 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54331 + /* for backward compatibility, tpe_invert always defaults to on if
54332 + enabled in the kernel
54333 + */
54334 + grsec_enable_tpe_invert = 1;
54335 +#endif
54336 +
54337 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54338 +#ifndef CONFIG_GRKERNSEC_SYSCTL
54339 + grsec_lock = 1;
54340 +#endif
54341 +
54342 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54343 + grsec_enable_audit_textrel = 1;
54344 +#endif
54345 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54346 + grsec_enable_log_rwxmaps = 1;
54347 +#endif
54348 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54349 + grsec_enable_group = 1;
54350 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54351 +#endif
54352 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54353 + grsec_enable_chdir = 1;
54354 +#endif
54355 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54356 + grsec_enable_harden_ptrace = 1;
54357 +#endif
54358 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54359 + grsec_enable_mount = 1;
54360 +#endif
54361 +#ifdef CONFIG_GRKERNSEC_LINK
54362 + grsec_enable_link = 1;
54363 +#endif
54364 +#ifdef CONFIG_GRKERNSEC_BRUTE
54365 + grsec_enable_brute = 1;
54366 +#endif
54367 +#ifdef CONFIG_GRKERNSEC_DMESG
54368 + grsec_enable_dmesg = 1;
54369 +#endif
54370 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54371 + grsec_enable_blackhole = 1;
54372 + grsec_lastack_retries = 4;
54373 +#endif
54374 +#ifdef CONFIG_GRKERNSEC_FIFO
54375 + grsec_enable_fifo = 1;
54376 +#endif
54377 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54378 + grsec_enable_execlog = 1;
54379 +#endif
54380 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54381 + grsec_enable_signal = 1;
54382 +#endif
54383 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54384 + grsec_enable_forkfail = 1;
54385 +#endif
54386 +#ifdef CONFIG_GRKERNSEC_TIME
54387 + grsec_enable_time = 1;
54388 +#endif
54389 +#ifdef CONFIG_GRKERNSEC_RESLOG
54390 + grsec_resource_logging = 1;
54391 +#endif
54392 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54393 + grsec_enable_chroot_findtask = 1;
54394 +#endif
54395 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54396 + grsec_enable_chroot_unix = 1;
54397 +#endif
54398 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54399 + grsec_enable_chroot_mount = 1;
54400 +#endif
54401 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54402 + grsec_enable_chroot_fchdir = 1;
54403 +#endif
54404 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54405 + grsec_enable_chroot_shmat = 1;
54406 +#endif
54407 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54408 + grsec_enable_audit_ptrace = 1;
54409 +#endif
54410 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54411 + grsec_enable_chroot_double = 1;
54412 +#endif
54413 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54414 + grsec_enable_chroot_pivot = 1;
54415 +#endif
54416 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54417 + grsec_enable_chroot_chdir = 1;
54418 +#endif
54419 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54420 + grsec_enable_chroot_chmod = 1;
54421 +#endif
54422 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54423 + grsec_enable_chroot_mknod = 1;
54424 +#endif
54425 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54426 + grsec_enable_chroot_nice = 1;
54427 +#endif
54428 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54429 + grsec_enable_chroot_execlog = 1;
54430 +#endif
54431 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54432 + grsec_enable_chroot_caps = 1;
54433 +#endif
54434 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54435 + grsec_enable_chroot_sysctl = 1;
54436 +#endif
54437 +#ifdef CONFIG_GRKERNSEC_TPE
54438 + grsec_enable_tpe = 1;
54439 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54440 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
54441 + grsec_enable_tpe_all = 1;
54442 +#endif
54443 +#endif
54444 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54445 + grsec_enable_socket_all = 1;
54446 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54447 +#endif
54448 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54449 + grsec_enable_socket_client = 1;
54450 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54451 +#endif
54452 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54453 + grsec_enable_socket_server = 1;
54454 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54455 +#endif
54456 +#endif
54457 +
54458 + return;
54459 +}
54460 diff -urNp linux-3.0.8/grsecurity/grsec_link.c linux-3.0.8/grsecurity/grsec_link.c
54461 --- linux-3.0.8/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54462 +++ linux-3.0.8/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
54463 @@ -0,0 +1,43 @@
54464 +#include <linux/kernel.h>
54465 +#include <linux/sched.h>
54466 +#include <linux/fs.h>
54467 +#include <linux/file.h>
54468 +#include <linux/grinternal.h>
54469 +
54470 +int
54471 +gr_handle_follow_link(const struct inode *parent,
54472 + const struct inode *inode,
54473 + const struct dentry *dentry, const struct vfsmount *mnt)
54474 +{
54475 +#ifdef CONFIG_GRKERNSEC_LINK
54476 + const struct cred *cred = current_cred();
54477 +
54478 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54479 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54480 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54481 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54482 + return -EACCES;
54483 + }
54484 +#endif
54485 + return 0;
54486 +}
54487 +
54488 +int
54489 +gr_handle_hardlink(const struct dentry *dentry,
54490 + const struct vfsmount *mnt,
54491 + struct inode *inode, const int mode, const char *to)
54492 +{
54493 +#ifdef CONFIG_GRKERNSEC_LINK
54494 + const struct cred *cred = current_cred();
54495 +
54496 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54497 + (!S_ISREG(mode) || (mode & S_ISUID) ||
54498 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54499 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54500 + !capable(CAP_FOWNER) && cred->uid) {
54501 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54502 + return -EPERM;
54503 + }
54504 +#endif
54505 + return 0;
54506 +}
54507 diff -urNp linux-3.0.8/grsecurity/grsec_log.c linux-3.0.8/grsecurity/grsec_log.c
54508 --- linux-3.0.8/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54509 +++ linux-3.0.8/grsecurity/grsec_log.c 2011-09-26 10:46:21.000000000 -0400
54510 @@ -0,0 +1,315 @@
54511 +#include <linux/kernel.h>
54512 +#include <linux/sched.h>
54513 +#include <linux/file.h>
54514 +#include <linux/tty.h>
54515 +#include <linux/fs.h>
54516 +#include <linux/grinternal.h>
54517 +
54518 +#ifdef CONFIG_TREE_PREEMPT_RCU
54519 +#define DISABLE_PREEMPT() preempt_disable()
54520 +#define ENABLE_PREEMPT() preempt_enable()
54521 +#else
54522 +#define DISABLE_PREEMPT()
54523 +#define ENABLE_PREEMPT()
54524 +#endif
54525 +
54526 +#define BEGIN_LOCKS(x) \
54527 + DISABLE_PREEMPT(); \
54528 + rcu_read_lock(); \
54529 + read_lock(&tasklist_lock); \
54530 + read_lock(&grsec_exec_file_lock); \
54531 + if (x != GR_DO_AUDIT) \
54532 + spin_lock(&grsec_alert_lock); \
54533 + else \
54534 + spin_lock(&grsec_audit_lock)
54535 +
54536 +#define END_LOCKS(x) \
54537 + if (x != GR_DO_AUDIT) \
54538 + spin_unlock(&grsec_alert_lock); \
54539 + else \
54540 + spin_unlock(&grsec_audit_lock); \
54541 + read_unlock(&grsec_exec_file_lock); \
54542 + read_unlock(&tasklist_lock); \
54543 + rcu_read_unlock(); \
54544 + ENABLE_PREEMPT(); \
54545 + if (x == GR_DONT_AUDIT) \
54546 + gr_handle_alertkill(current)
54547 +
54548 +enum {
54549 + FLOODING,
54550 + NO_FLOODING
54551 +};
54552 +
54553 +extern char *gr_alert_log_fmt;
54554 +extern char *gr_audit_log_fmt;
54555 +extern char *gr_alert_log_buf;
54556 +extern char *gr_audit_log_buf;
54557 +
54558 +static int gr_log_start(int audit)
54559 +{
54560 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54561 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54562 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54563 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
54564 + unsigned long curr_secs = get_seconds();
54565 +
54566 + if (audit == GR_DO_AUDIT)
54567 + goto set_fmt;
54568 +
54569 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
54570 + grsec_alert_wtime = curr_secs;
54571 + grsec_alert_fyet = 0;
54572 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
54573 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54574 + grsec_alert_fyet++;
54575 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54576 + grsec_alert_wtime = curr_secs;
54577 + grsec_alert_fyet++;
54578 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54579 + return FLOODING;
54580 + }
54581 + else return FLOODING;
54582 +
54583 +set_fmt:
54584 +#endif
54585 + memset(buf, 0, PAGE_SIZE);
54586 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
54587 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54588 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54589 + } else if (current->signal->curr_ip) {
54590 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54591 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54592 + } else if (gr_acl_is_enabled()) {
54593 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54594 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54595 + } else {
54596 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
54597 + strcpy(buf, fmt);
54598 + }
54599 +
54600 + return NO_FLOODING;
54601 +}
54602 +
54603 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54604 + __attribute__ ((format (printf, 2, 0)));
54605 +
54606 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54607 +{
54608 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54609 + unsigned int len = strlen(buf);
54610 +
54611 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54612 +
54613 + return;
54614 +}
54615 +
54616 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54617 + __attribute__ ((format (printf, 2, 3)));
54618 +
54619 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54620 +{
54621 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54622 + unsigned int len = strlen(buf);
54623 + va_list ap;
54624 +
54625 + va_start(ap, msg);
54626 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54627 + va_end(ap);
54628 +
54629 + return;
54630 +}
54631 +
54632 +static void gr_log_end(int audit)
54633 +{
54634 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54635 + unsigned int len = strlen(buf);
54636 +
54637 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54638 + printk("%s\n", buf);
54639 +
54640 + return;
54641 +}
54642 +
54643 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54644 +{
54645 + int logtype;
54646 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54647 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54648 + void *voidptr = NULL;
54649 + int num1 = 0, num2 = 0;
54650 + unsigned long ulong1 = 0, ulong2 = 0;
54651 + struct dentry *dentry = NULL;
54652 + struct vfsmount *mnt = NULL;
54653 + struct file *file = NULL;
54654 + struct task_struct *task = NULL;
54655 + const struct cred *cred, *pcred;
54656 + va_list ap;
54657 +
54658 + BEGIN_LOCKS(audit);
54659 + logtype = gr_log_start(audit);
54660 + if (logtype == FLOODING) {
54661 + END_LOCKS(audit);
54662 + return;
54663 + }
54664 + va_start(ap, argtypes);
54665 + switch (argtypes) {
54666 + case GR_TTYSNIFF:
54667 + task = va_arg(ap, struct task_struct *);
54668 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54669 + break;
54670 + case GR_SYSCTL_HIDDEN:
54671 + str1 = va_arg(ap, char *);
54672 + gr_log_middle_varargs(audit, msg, result, str1);
54673 + break;
54674 + case GR_RBAC:
54675 + dentry = va_arg(ap, struct dentry *);
54676 + mnt = va_arg(ap, struct vfsmount *);
54677 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54678 + break;
54679 + case GR_RBAC_STR:
54680 + dentry = va_arg(ap, struct dentry *);
54681 + mnt = va_arg(ap, struct vfsmount *);
54682 + str1 = va_arg(ap, char *);
54683 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54684 + break;
54685 + case GR_STR_RBAC:
54686 + str1 = va_arg(ap, char *);
54687 + dentry = va_arg(ap, struct dentry *);
54688 + mnt = va_arg(ap, struct vfsmount *);
54689 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54690 + break;
54691 + case GR_RBAC_MODE2:
54692 + dentry = va_arg(ap, struct dentry *);
54693 + mnt = va_arg(ap, struct vfsmount *);
54694 + str1 = va_arg(ap, char *);
54695 + str2 = va_arg(ap, char *);
54696 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54697 + break;
54698 + case GR_RBAC_MODE3:
54699 + dentry = va_arg(ap, struct dentry *);
54700 + mnt = va_arg(ap, struct vfsmount *);
54701 + str1 = va_arg(ap, char *);
54702 + str2 = va_arg(ap, char *);
54703 + str3 = va_arg(ap, char *);
54704 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54705 + break;
54706 + case GR_FILENAME:
54707 + dentry = va_arg(ap, struct dentry *);
54708 + mnt = va_arg(ap, struct vfsmount *);
54709 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54710 + break;
54711 + case GR_STR_FILENAME:
54712 + str1 = va_arg(ap, char *);
54713 + dentry = va_arg(ap, struct dentry *);
54714 + mnt = va_arg(ap, struct vfsmount *);
54715 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54716 + break;
54717 + case GR_FILENAME_STR:
54718 + dentry = va_arg(ap, struct dentry *);
54719 + mnt = va_arg(ap, struct vfsmount *);
54720 + str1 = va_arg(ap, char *);
54721 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54722 + break;
54723 + case GR_FILENAME_TWO_INT:
54724 + dentry = va_arg(ap, struct dentry *);
54725 + mnt = va_arg(ap, struct vfsmount *);
54726 + num1 = va_arg(ap, int);
54727 + num2 = va_arg(ap, int);
54728 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54729 + break;
54730 + case GR_FILENAME_TWO_INT_STR:
54731 + dentry = va_arg(ap, struct dentry *);
54732 + mnt = va_arg(ap, struct vfsmount *);
54733 + num1 = va_arg(ap, int);
54734 + num2 = va_arg(ap, int);
54735 + str1 = va_arg(ap, char *);
54736 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54737 + break;
54738 + case GR_TEXTREL:
54739 + file = va_arg(ap, struct file *);
54740 + ulong1 = va_arg(ap, unsigned long);
54741 + ulong2 = va_arg(ap, unsigned long);
54742 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54743 + break;
54744 + case GR_PTRACE:
54745 + task = va_arg(ap, struct task_struct *);
54746 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54747 + break;
54748 + case GR_RESOURCE:
54749 + task = va_arg(ap, struct task_struct *);
54750 + cred = __task_cred(task);
54751 + pcred = __task_cred(task->real_parent);
54752 + ulong1 = va_arg(ap, unsigned long);
54753 + str1 = va_arg(ap, char *);
54754 + ulong2 = va_arg(ap, unsigned long);
54755 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54756 + break;
54757 + case GR_CAP:
54758 + task = va_arg(ap, struct task_struct *);
54759 + cred = __task_cred(task);
54760 + pcred = __task_cred(task->real_parent);
54761 + str1 = va_arg(ap, char *);
54762 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54763 + break;
54764 + case GR_SIG:
54765 + str1 = va_arg(ap, char *);
54766 + voidptr = va_arg(ap, void *);
54767 + gr_log_middle_varargs(audit, msg, str1, voidptr);
54768 + break;
54769 + case GR_SIG2:
54770 + task = va_arg(ap, struct task_struct *);
54771 + cred = __task_cred(task);
54772 + pcred = __task_cred(task->real_parent);
54773 + num1 = va_arg(ap, int);
54774 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54775 + break;
54776 + case GR_CRASH1:
54777 + task = va_arg(ap, struct task_struct *);
54778 + cred = __task_cred(task);
54779 + pcred = __task_cred(task->real_parent);
54780 + ulong1 = va_arg(ap, unsigned long);
54781 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54782 + break;
54783 + case GR_CRASH2:
54784 + task = va_arg(ap, struct task_struct *);
54785 + cred = __task_cred(task);
54786 + pcred = __task_cred(task->real_parent);
54787 + ulong1 = va_arg(ap, unsigned long);
54788 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54789 + break;
54790 + case GR_RWXMAP:
54791 + file = va_arg(ap, struct file *);
54792 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54793 + break;
54794 + case GR_PSACCT:
54795 + {
54796 + unsigned int wday, cday;
54797 + __u8 whr, chr;
54798 + __u8 wmin, cmin;
54799 + __u8 wsec, csec;
54800 + char cur_tty[64] = { 0 };
54801 + char parent_tty[64] = { 0 };
54802 +
54803 + task = va_arg(ap, struct task_struct *);
54804 + wday = va_arg(ap, unsigned int);
54805 + cday = va_arg(ap, unsigned int);
54806 + whr = va_arg(ap, int);
54807 + chr = va_arg(ap, int);
54808 + wmin = va_arg(ap, int);
54809 + cmin = va_arg(ap, int);
54810 + wsec = va_arg(ap, int);
54811 + csec = va_arg(ap, int);
54812 + ulong1 = va_arg(ap, unsigned long);
54813 + cred = __task_cred(task);
54814 + pcred = __task_cred(task->real_parent);
54815 +
54816 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54817 + }
54818 + break;
54819 + default:
54820 + gr_log_middle(audit, msg, ap);
54821 + }
54822 + va_end(ap);
54823 + gr_log_end(audit);
54824 + END_LOCKS(audit);
54825 +}
54826 diff -urNp linux-3.0.8/grsecurity/grsec_mem.c linux-3.0.8/grsecurity/grsec_mem.c
54827 --- linux-3.0.8/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54828 +++ linux-3.0.8/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
54829 @@ -0,0 +1,33 @@
54830 +#include <linux/kernel.h>
54831 +#include <linux/sched.h>
54832 +#include <linux/mm.h>
54833 +#include <linux/mman.h>
54834 +#include <linux/grinternal.h>
54835 +
54836 +void
54837 +gr_handle_ioperm(void)
54838 +{
54839 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54840 + return;
54841 +}
54842 +
54843 +void
54844 +gr_handle_iopl(void)
54845 +{
54846 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54847 + return;
54848 +}
54849 +
54850 +void
54851 +gr_handle_mem_readwrite(u64 from, u64 to)
54852 +{
54853 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54854 + return;
54855 +}
54856 +
54857 +void
54858 +gr_handle_vm86(void)
54859 +{
54860 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54861 + return;
54862 +}
54863 diff -urNp linux-3.0.8/grsecurity/grsec_mount.c linux-3.0.8/grsecurity/grsec_mount.c
54864 --- linux-3.0.8/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54865 +++ linux-3.0.8/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
54866 @@ -0,0 +1,62 @@
54867 +#include <linux/kernel.h>
54868 +#include <linux/sched.h>
54869 +#include <linux/mount.h>
54870 +#include <linux/grsecurity.h>
54871 +#include <linux/grinternal.h>
54872 +
54873 +void
54874 +gr_log_remount(const char *devname, const int retval)
54875 +{
54876 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54877 + if (grsec_enable_mount && (retval >= 0))
54878 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54879 +#endif
54880 + return;
54881 +}
54882 +
54883 +void
54884 +gr_log_unmount(const char *devname, const int retval)
54885 +{
54886 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54887 + if (grsec_enable_mount && (retval >= 0))
54888 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54889 +#endif
54890 + return;
54891 +}
54892 +
54893 +void
54894 +gr_log_mount(const char *from, const char *to, const int retval)
54895 +{
54896 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54897 + if (grsec_enable_mount && (retval >= 0))
54898 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54899 +#endif
54900 + return;
54901 +}
54902 +
54903 +int
54904 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54905 +{
54906 +#ifdef CONFIG_GRKERNSEC_ROFS
54907 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54908 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54909 + return -EPERM;
54910 + } else
54911 + return 0;
54912 +#endif
54913 + return 0;
54914 +}
54915 +
54916 +int
54917 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54918 +{
54919 +#ifdef CONFIG_GRKERNSEC_ROFS
54920 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54921 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54922 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54923 + return -EPERM;
54924 + } else
54925 + return 0;
54926 +#endif
54927 + return 0;
54928 +}
54929 diff -urNp linux-3.0.8/grsecurity/grsec_pax.c linux-3.0.8/grsecurity/grsec_pax.c
54930 --- linux-3.0.8/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54931 +++ linux-3.0.8/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
54932 @@ -0,0 +1,36 @@
54933 +#include <linux/kernel.h>
54934 +#include <linux/sched.h>
54935 +#include <linux/mm.h>
54936 +#include <linux/file.h>
54937 +#include <linux/grinternal.h>
54938 +#include <linux/grsecurity.h>
54939 +
54940 +void
54941 +gr_log_textrel(struct vm_area_struct * vma)
54942 +{
54943 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54944 + if (grsec_enable_audit_textrel)
54945 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54946 +#endif
54947 + return;
54948 +}
54949 +
54950 +void
54951 +gr_log_rwxmmap(struct file *file)
54952 +{
54953 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54954 + if (grsec_enable_log_rwxmaps)
54955 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54956 +#endif
54957 + return;
54958 +}
54959 +
54960 +void
54961 +gr_log_rwxmprotect(struct file *file)
54962 +{
54963 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54964 + if (grsec_enable_log_rwxmaps)
54965 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54966 +#endif
54967 + return;
54968 +}
54969 diff -urNp linux-3.0.8/grsecurity/grsec_ptrace.c linux-3.0.8/grsecurity/grsec_ptrace.c
54970 --- linux-3.0.8/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54971 +++ linux-3.0.8/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
54972 @@ -0,0 +1,14 @@
54973 +#include <linux/kernel.h>
54974 +#include <linux/sched.h>
54975 +#include <linux/grinternal.h>
54976 +#include <linux/grsecurity.h>
54977 +
54978 +void
54979 +gr_audit_ptrace(struct task_struct *task)
54980 +{
54981 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54982 + if (grsec_enable_audit_ptrace)
54983 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54984 +#endif
54985 + return;
54986 +}
54987 diff -urNp linux-3.0.8/grsecurity/grsec_sig.c linux-3.0.8/grsecurity/grsec_sig.c
54988 --- linux-3.0.8/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54989 +++ linux-3.0.8/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
54990 @@ -0,0 +1,206 @@
54991 +#include <linux/kernel.h>
54992 +#include <linux/sched.h>
54993 +#include <linux/delay.h>
54994 +#include <linux/grsecurity.h>
54995 +#include <linux/grinternal.h>
54996 +#include <linux/hardirq.h>
54997 +
54998 +char *signames[] = {
54999 + [SIGSEGV] = "Segmentation fault",
55000 + [SIGILL] = "Illegal instruction",
55001 + [SIGABRT] = "Abort",
55002 + [SIGBUS] = "Invalid alignment/Bus error"
55003 +};
55004 +
55005 +void
55006 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55007 +{
55008 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55009 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55010 + (sig == SIGABRT) || (sig == SIGBUS))) {
55011 + if (t->pid == current->pid) {
55012 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55013 + } else {
55014 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55015 + }
55016 + }
55017 +#endif
55018 + return;
55019 +}
55020 +
55021 +int
55022 +gr_handle_signal(const struct task_struct *p, const int sig)
55023 +{
55024 +#ifdef CONFIG_GRKERNSEC
55025 + if (current->pid > 1 && gr_check_protected_task(p)) {
55026 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55027 + return -EPERM;
55028 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55029 + return -EPERM;
55030 + }
55031 +#endif
55032 + return 0;
55033 +}
55034 +
55035 +#ifdef CONFIG_GRKERNSEC
55036 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55037 +
55038 +int gr_fake_force_sig(int sig, struct task_struct *t)
55039 +{
55040 + unsigned long int flags;
55041 + int ret, blocked, ignored;
55042 + struct k_sigaction *action;
55043 +
55044 + spin_lock_irqsave(&t->sighand->siglock, flags);
55045 + action = &t->sighand->action[sig-1];
55046 + ignored = action->sa.sa_handler == SIG_IGN;
55047 + blocked = sigismember(&t->blocked, sig);
55048 + if (blocked || ignored) {
55049 + action->sa.sa_handler = SIG_DFL;
55050 + if (blocked) {
55051 + sigdelset(&t->blocked, sig);
55052 + recalc_sigpending_and_wake(t);
55053 + }
55054 + }
55055 + if (action->sa.sa_handler == SIG_DFL)
55056 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
55057 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
55058 +
55059 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
55060 +
55061 + return ret;
55062 +}
55063 +#endif
55064 +
55065 +#ifdef CONFIG_GRKERNSEC_BRUTE
55066 +#define GR_USER_BAN_TIME (15 * 60)
55067 +
55068 +static int __get_dumpable(unsigned long mm_flags)
55069 +{
55070 + int ret;
55071 +
55072 + ret = mm_flags & MMF_DUMPABLE_MASK;
55073 + return (ret >= 2) ? 2 : ret;
55074 +}
55075 +#endif
55076 +
55077 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
55078 +{
55079 +#ifdef CONFIG_GRKERNSEC_BRUTE
55080 + uid_t uid = 0;
55081 +
55082 + if (!grsec_enable_brute)
55083 + return;
55084 +
55085 + rcu_read_lock();
55086 + read_lock(&tasklist_lock);
55087 + read_lock(&grsec_exec_file_lock);
55088 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55089 + p->real_parent->brute = 1;
55090 + else {
55091 + const struct cred *cred = __task_cred(p), *cred2;
55092 + struct task_struct *tsk, *tsk2;
55093 +
55094 + if (!__get_dumpable(mm_flags) && cred->uid) {
55095 + struct user_struct *user;
55096 +
55097 + uid = cred->uid;
55098 +
55099 + /* this is put upon execution past expiration */
55100 + user = find_user(uid);
55101 + if (user == NULL)
55102 + goto unlock;
55103 + user->banned = 1;
55104 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55105 + if (user->ban_expires == ~0UL)
55106 + user->ban_expires--;
55107 +
55108 + do_each_thread(tsk2, tsk) {
55109 + cred2 = __task_cred(tsk);
55110 + if (tsk != p && cred2->uid == uid)
55111 + gr_fake_force_sig(SIGKILL, tsk);
55112 + } while_each_thread(tsk2, tsk);
55113 + }
55114 + }
55115 +unlock:
55116 + read_unlock(&grsec_exec_file_lock);
55117 + read_unlock(&tasklist_lock);
55118 + rcu_read_unlock();
55119 +
55120 + if (uid)
55121 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55122 +
55123 +#endif
55124 + return;
55125 +}
55126 +
55127 +void gr_handle_brute_check(void)
55128 +{
55129 +#ifdef CONFIG_GRKERNSEC_BRUTE
55130 + if (current->brute)
55131 + msleep(30 * 1000);
55132 +#endif
55133 + return;
55134 +}
55135 +
55136 +void gr_handle_kernel_exploit(void)
55137 +{
55138 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55139 + const struct cred *cred;
55140 + struct task_struct *tsk, *tsk2;
55141 + struct user_struct *user;
55142 + uid_t uid;
55143 +
55144 + if (in_irq() || in_serving_softirq() || in_nmi())
55145 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55146 +
55147 + uid = current_uid();
55148 +
55149 + if (uid == 0)
55150 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
55151 + else {
55152 + /* kill all the processes of this user, hold a reference
55153 + to their creds struct, and prevent them from creating
55154 + another process until system reset
55155 + */
55156 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55157 + /* we intentionally leak this ref */
55158 + user = get_uid(current->cred->user);
55159 + if (user) {
55160 + user->banned = 1;
55161 + user->ban_expires = ~0UL;
55162 + }
55163 +
55164 + read_lock(&tasklist_lock);
55165 + do_each_thread(tsk2, tsk) {
55166 + cred = __task_cred(tsk);
55167 + if (cred->uid == uid)
55168 + gr_fake_force_sig(SIGKILL, tsk);
55169 + } while_each_thread(tsk2, tsk);
55170 + read_unlock(&tasklist_lock);
55171 + }
55172 +#endif
55173 +}
55174 +
55175 +int __gr_process_user_ban(struct user_struct *user)
55176 +{
55177 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55178 + if (unlikely(user->banned)) {
55179 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55180 + user->banned = 0;
55181 + user->ban_expires = 0;
55182 + free_uid(user);
55183 + } else
55184 + return -EPERM;
55185 + }
55186 +#endif
55187 + return 0;
55188 +}
55189 +
55190 +int gr_process_user_ban(void)
55191 +{
55192 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55193 + return __gr_process_user_ban(current->cred->user);
55194 +#endif
55195 + return 0;
55196 +}
55197 diff -urNp linux-3.0.8/grsecurity/grsec_sock.c linux-3.0.8/grsecurity/grsec_sock.c
55198 --- linux-3.0.8/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55199 +++ linux-3.0.8/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
55200 @@ -0,0 +1,244 @@
55201 +#include <linux/kernel.h>
55202 +#include <linux/module.h>
55203 +#include <linux/sched.h>
55204 +#include <linux/file.h>
55205 +#include <linux/net.h>
55206 +#include <linux/in.h>
55207 +#include <linux/ip.h>
55208 +#include <net/sock.h>
55209 +#include <net/inet_sock.h>
55210 +#include <linux/grsecurity.h>
55211 +#include <linux/grinternal.h>
55212 +#include <linux/gracl.h>
55213 +
55214 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55215 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55216 +
55217 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
55218 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
55219 +
55220 +#ifdef CONFIG_UNIX_MODULE
55221 +EXPORT_SYMBOL(gr_acl_handle_unix);
55222 +EXPORT_SYMBOL(gr_acl_handle_mknod);
55223 +EXPORT_SYMBOL(gr_handle_chroot_unix);
55224 +EXPORT_SYMBOL(gr_handle_create);
55225 +#endif
55226 +
55227 +#ifdef CONFIG_GRKERNSEC
55228 +#define gr_conn_table_size 32749
55229 +struct conn_table_entry {
55230 + struct conn_table_entry *next;
55231 + struct signal_struct *sig;
55232 +};
55233 +
55234 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55235 +DEFINE_SPINLOCK(gr_conn_table_lock);
55236 +
55237 +extern const char * gr_socktype_to_name(unsigned char type);
55238 +extern const char * gr_proto_to_name(unsigned char proto);
55239 +extern const char * gr_sockfamily_to_name(unsigned char family);
55240 +
55241 +static __inline__ int
55242 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55243 +{
55244 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55245 +}
55246 +
55247 +static __inline__ int
55248 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55249 + __u16 sport, __u16 dport)
55250 +{
55251 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55252 + sig->gr_sport == sport && sig->gr_dport == dport))
55253 + return 1;
55254 + else
55255 + return 0;
55256 +}
55257 +
55258 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55259 +{
55260 + struct conn_table_entry **match;
55261 + unsigned int index;
55262 +
55263 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55264 + sig->gr_sport, sig->gr_dport,
55265 + gr_conn_table_size);
55266 +
55267 + newent->sig = sig;
55268 +
55269 + match = &gr_conn_table[index];
55270 + newent->next = *match;
55271 + *match = newent;
55272 +
55273 + return;
55274 +}
55275 +
55276 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55277 +{
55278 + struct conn_table_entry *match, *last = NULL;
55279 + unsigned int index;
55280 +
55281 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55282 + sig->gr_sport, sig->gr_dport,
55283 + gr_conn_table_size);
55284 +
55285 + match = gr_conn_table[index];
55286 + while (match && !conn_match(match->sig,
55287 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55288 + sig->gr_dport)) {
55289 + last = match;
55290 + match = match->next;
55291 + }
55292 +
55293 + if (match) {
55294 + if (last)
55295 + last->next = match->next;
55296 + else
55297 + gr_conn_table[index] = NULL;
55298 + kfree(match);
55299 + }
55300 +
55301 + return;
55302 +}
55303 +
55304 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55305 + __u16 sport, __u16 dport)
55306 +{
55307 + struct conn_table_entry *match;
55308 + unsigned int index;
55309 +
55310 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55311 +
55312 + match = gr_conn_table[index];
55313 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55314 + match = match->next;
55315 +
55316 + if (match)
55317 + return match->sig;
55318 + else
55319 + return NULL;
55320 +}
55321 +
55322 +#endif
55323 +
55324 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55325 +{
55326 +#ifdef CONFIG_GRKERNSEC
55327 + struct signal_struct *sig = task->signal;
55328 + struct conn_table_entry *newent;
55329 +
55330 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55331 + if (newent == NULL)
55332 + return;
55333 + /* no bh lock needed since we are called with bh disabled */
55334 + spin_lock(&gr_conn_table_lock);
55335 + gr_del_task_from_ip_table_nolock(sig);
55336 + sig->gr_saddr = inet->inet_rcv_saddr;
55337 + sig->gr_daddr = inet->inet_daddr;
55338 + sig->gr_sport = inet->inet_sport;
55339 + sig->gr_dport = inet->inet_dport;
55340 + gr_add_to_task_ip_table_nolock(sig, newent);
55341 + spin_unlock(&gr_conn_table_lock);
55342 +#endif
55343 + return;
55344 +}
55345 +
55346 +void gr_del_task_from_ip_table(struct task_struct *task)
55347 +{
55348 +#ifdef CONFIG_GRKERNSEC
55349 + spin_lock_bh(&gr_conn_table_lock);
55350 + gr_del_task_from_ip_table_nolock(task->signal);
55351 + spin_unlock_bh(&gr_conn_table_lock);
55352 +#endif
55353 + return;
55354 +}
55355 +
55356 +void
55357 +gr_attach_curr_ip(const struct sock *sk)
55358 +{
55359 +#ifdef CONFIG_GRKERNSEC
55360 + struct signal_struct *p, *set;
55361 + const struct inet_sock *inet = inet_sk(sk);
55362 +
55363 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55364 + return;
55365 +
55366 + set = current->signal;
55367 +
55368 + spin_lock_bh(&gr_conn_table_lock);
55369 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
55370 + inet->inet_dport, inet->inet_sport);
55371 + if (unlikely(p != NULL)) {
55372 + set->curr_ip = p->curr_ip;
55373 + set->used_accept = 1;
55374 + gr_del_task_from_ip_table_nolock(p);
55375 + spin_unlock_bh(&gr_conn_table_lock);
55376 + return;
55377 + }
55378 + spin_unlock_bh(&gr_conn_table_lock);
55379 +
55380 + set->curr_ip = inet->inet_daddr;
55381 + set->used_accept = 1;
55382 +#endif
55383 + return;
55384 +}
55385 +
55386 +int
55387 +gr_handle_sock_all(const int family, const int type, const int protocol)
55388 +{
55389 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55390 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55391 + (family != AF_UNIX)) {
55392 + if (family == AF_INET)
55393 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55394 + else
55395 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55396 + return -EACCES;
55397 + }
55398 +#endif
55399 + return 0;
55400 +}
55401 +
55402 +int
55403 +gr_handle_sock_server(const struct sockaddr *sck)
55404 +{
55405 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55406 + if (grsec_enable_socket_server &&
55407 + in_group_p(grsec_socket_server_gid) &&
55408 + sck && (sck->sa_family != AF_UNIX) &&
55409 + (sck->sa_family != AF_LOCAL)) {
55410 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55411 + return -EACCES;
55412 + }
55413 +#endif
55414 + return 0;
55415 +}
55416 +
55417 +int
55418 +gr_handle_sock_server_other(const struct sock *sck)
55419 +{
55420 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55421 + if (grsec_enable_socket_server &&
55422 + in_group_p(grsec_socket_server_gid) &&
55423 + sck && (sck->sk_family != AF_UNIX) &&
55424 + (sck->sk_family != AF_LOCAL)) {
55425 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55426 + return -EACCES;
55427 + }
55428 +#endif
55429 + return 0;
55430 +}
55431 +
55432 +int
55433 +gr_handle_sock_client(const struct sockaddr *sck)
55434 +{
55435 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55436 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55437 + sck && (sck->sa_family != AF_UNIX) &&
55438 + (sck->sa_family != AF_LOCAL)) {
55439 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55440 + return -EACCES;
55441 + }
55442 +#endif
55443 + return 0;
55444 +}
55445 diff -urNp linux-3.0.8/grsecurity/grsec_sysctl.c linux-3.0.8/grsecurity/grsec_sysctl.c
55446 --- linux-3.0.8/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55447 +++ linux-3.0.8/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
55448 @@ -0,0 +1,433 @@
55449 +#include <linux/kernel.h>
55450 +#include <linux/sched.h>
55451 +#include <linux/sysctl.h>
55452 +#include <linux/grsecurity.h>
55453 +#include <linux/grinternal.h>
55454 +
55455 +int
55456 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55457 +{
55458 +#ifdef CONFIG_GRKERNSEC_SYSCTL
55459 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55460 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55461 + return -EACCES;
55462 + }
55463 +#endif
55464 + return 0;
55465 +}
55466 +
55467 +#ifdef CONFIG_GRKERNSEC_ROFS
55468 +static int __maybe_unused one = 1;
55469 +#endif
55470 +
55471 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55472 +struct ctl_table grsecurity_table[] = {
55473 +#ifdef CONFIG_GRKERNSEC_SYSCTL
55474 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55475 +#ifdef CONFIG_GRKERNSEC_IO
55476 + {
55477 + .procname = "disable_priv_io",
55478 + .data = &grsec_disable_privio,
55479 + .maxlen = sizeof(int),
55480 + .mode = 0600,
55481 + .proc_handler = &proc_dointvec,
55482 + },
55483 +#endif
55484 +#endif
55485 +#ifdef CONFIG_GRKERNSEC_LINK
55486 + {
55487 + .procname = "linking_restrictions",
55488 + .data = &grsec_enable_link,
55489 + .maxlen = sizeof(int),
55490 + .mode = 0600,
55491 + .proc_handler = &proc_dointvec,
55492 + },
55493 +#endif
55494 +#ifdef CONFIG_GRKERNSEC_BRUTE
55495 + {
55496 + .procname = "deter_bruteforce",
55497 + .data = &grsec_enable_brute,
55498 + .maxlen = sizeof(int),
55499 + .mode = 0600,
55500 + .proc_handler = &proc_dointvec,
55501 + },
55502 +#endif
55503 +#ifdef CONFIG_GRKERNSEC_FIFO
55504 + {
55505 + .procname = "fifo_restrictions",
55506 + .data = &grsec_enable_fifo,
55507 + .maxlen = sizeof(int),
55508 + .mode = 0600,
55509 + .proc_handler = &proc_dointvec,
55510 + },
55511 +#endif
55512 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55513 + {
55514 + .procname = "ip_blackhole",
55515 + .data = &grsec_enable_blackhole,
55516 + .maxlen = sizeof(int),
55517 + .mode = 0600,
55518 + .proc_handler = &proc_dointvec,
55519 + },
55520 + {
55521 + .procname = "lastack_retries",
55522 + .data = &grsec_lastack_retries,
55523 + .maxlen = sizeof(int),
55524 + .mode = 0600,
55525 + .proc_handler = &proc_dointvec,
55526 + },
55527 +#endif
55528 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55529 + {
55530 + .procname = "exec_logging",
55531 + .data = &grsec_enable_execlog,
55532 + .maxlen = sizeof(int),
55533 + .mode = 0600,
55534 + .proc_handler = &proc_dointvec,
55535 + },
55536 +#endif
55537 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55538 + {
55539 + .procname = "rwxmap_logging",
55540 + .data = &grsec_enable_log_rwxmaps,
55541 + .maxlen = sizeof(int),
55542 + .mode = 0600,
55543 + .proc_handler = &proc_dointvec,
55544 + },
55545 +#endif
55546 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55547 + {
55548 + .procname = "signal_logging",
55549 + .data = &grsec_enable_signal,
55550 + .maxlen = sizeof(int),
55551 + .mode = 0600,
55552 + .proc_handler = &proc_dointvec,
55553 + },
55554 +#endif
55555 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55556 + {
55557 + .procname = "forkfail_logging",
55558 + .data = &grsec_enable_forkfail,
55559 + .maxlen = sizeof(int),
55560 + .mode = 0600,
55561 + .proc_handler = &proc_dointvec,
55562 + },
55563 +#endif
55564 +#ifdef CONFIG_GRKERNSEC_TIME
55565 + {
55566 + .procname = "timechange_logging",
55567 + .data = &grsec_enable_time,
55568 + .maxlen = sizeof(int),
55569 + .mode = 0600,
55570 + .proc_handler = &proc_dointvec,
55571 + },
55572 +#endif
55573 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55574 + {
55575 + .procname = "chroot_deny_shmat",
55576 + .data = &grsec_enable_chroot_shmat,
55577 + .maxlen = sizeof(int),
55578 + .mode = 0600,
55579 + .proc_handler = &proc_dointvec,
55580 + },
55581 +#endif
55582 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55583 + {
55584 + .procname = "chroot_deny_unix",
55585 + .data = &grsec_enable_chroot_unix,
55586 + .maxlen = sizeof(int),
55587 + .mode = 0600,
55588 + .proc_handler = &proc_dointvec,
55589 + },
55590 +#endif
55591 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55592 + {
55593 + .procname = "chroot_deny_mount",
55594 + .data = &grsec_enable_chroot_mount,
55595 + .maxlen = sizeof(int),
55596 + .mode = 0600,
55597 + .proc_handler = &proc_dointvec,
55598 + },
55599 +#endif
55600 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55601 + {
55602 + .procname = "chroot_deny_fchdir",
55603 + .data = &grsec_enable_chroot_fchdir,
55604 + .maxlen = sizeof(int),
55605 + .mode = 0600,
55606 + .proc_handler = &proc_dointvec,
55607 + },
55608 +#endif
55609 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55610 + {
55611 + .procname = "chroot_deny_chroot",
55612 + .data = &grsec_enable_chroot_double,
55613 + .maxlen = sizeof(int),
55614 + .mode = 0600,
55615 + .proc_handler = &proc_dointvec,
55616 + },
55617 +#endif
55618 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55619 + {
55620 + .procname = "chroot_deny_pivot",
55621 + .data = &grsec_enable_chroot_pivot,
55622 + .maxlen = sizeof(int),
55623 + .mode = 0600,
55624 + .proc_handler = &proc_dointvec,
55625 + },
55626 +#endif
55627 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55628 + {
55629 + .procname = "chroot_enforce_chdir",
55630 + .data = &grsec_enable_chroot_chdir,
55631 + .maxlen = sizeof(int),
55632 + .mode = 0600,
55633 + .proc_handler = &proc_dointvec,
55634 + },
55635 +#endif
55636 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55637 + {
55638 + .procname = "chroot_deny_chmod",
55639 + .data = &grsec_enable_chroot_chmod,
55640 + .maxlen = sizeof(int),
55641 + .mode = 0600,
55642 + .proc_handler = &proc_dointvec,
55643 + },
55644 +#endif
55645 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55646 + {
55647 + .procname = "chroot_deny_mknod",
55648 + .data = &grsec_enable_chroot_mknod,
55649 + .maxlen = sizeof(int),
55650 + .mode = 0600,
55651 + .proc_handler = &proc_dointvec,
55652 + },
55653 +#endif
55654 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55655 + {
55656 + .procname = "chroot_restrict_nice",
55657 + .data = &grsec_enable_chroot_nice,
55658 + .maxlen = sizeof(int),
55659 + .mode = 0600,
55660 + .proc_handler = &proc_dointvec,
55661 + },
55662 +#endif
55663 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55664 + {
55665 + .procname = "chroot_execlog",
55666 + .data = &grsec_enable_chroot_execlog,
55667 + .maxlen = sizeof(int),
55668 + .mode = 0600,
55669 + .proc_handler = &proc_dointvec,
55670 + },
55671 +#endif
55672 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55673 + {
55674 + .procname = "chroot_caps",
55675 + .data = &grsec_enable_chroot_caps,
55676 + .maxlen = sizeof(int),
55677 + .mode = 0600,
55678 + .proc_handler = &proc_dointvec,
55679 + },
55680 +#endif
55681 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55682 + {
55683 + .procname = "chroot_deny_sysctl",
55684 + .data = &grsec_enable_chroot_sysctl,
55685 + .maxlen = sizeof(int),
55686 + .mode = 0600,
55687 + .proc_handler = &proc_dointvec,
55688 + },
55689 +#endif
55690 +#ifdef CONFIG_GRKERNSEC_TPE
55691 + {
55692 + .procname = "tpe",
55693 + .data = &grsec_enable_tpe,
55694 + .maxlen = sizeof(int),
55695 + .mode = 0600,
55696 + .proc_handler = &proc_dointvec,
55697 + },
55698 + {
55699 + .procname = "tpe_gid",
55700 + .data = &grsec_tpe_gid,
55701 + .maxlen = sizeof(int),
55702 + .mode = 0600,
55703 + .proc_handler = &proc_dointvec,
55704 + },
55705 +#endif
55706 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55707 + {
55708 + .procname = "tpe_invert",
55709 + .data = &grsec_enable_tpe_invert,
55710 + .maxlen = sizeof(int),
55711 + .mode = 0600,
55712 + .proc_handler = &proc_dointvec,
55713 + },
55714 +#endif
55715 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55716 + {
55717 + .procname = "tpe_restrict_all",
55718 + .data = &grsec_enable_tpe_all,
55719 + .maxlen = sizeof(int),
55720 + .mode = 0600,
55721 + .proc_handler = &proc_dointvec,
55722 + },
55723 +#endif
55724 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55725 + {
55726 + .procname = "socket_all",
55727 + .data = &grsec_enable_socket_all,
55728 + .maxlen = sizeof(int),
55729 + .mode = 0600,
55730 + .proc_handler = &proc_dointvec,
55731 + },
55732 + {
55733 + .procname = "socket_all_gid",
55734 + .data = &grsec_socket_all_gid,
55735 + .maxlen = sizeof(int),
55736 + .mode = 0600,
55737 + .proc_handler = &proc_dointvec,
55738 + },
55739 +#endif
55740 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55741 + {
55742 + .procname = "socket_client",
55743 + .data = &grsec_enable_socket_client,
55744 + .maxlen = sizeof(int),
55745 + .mode = 0600,
55746 + .proc_handler = &proc_dointvec,
55747 + },
55748 + {
55749 + .procname = "socket_client_gid",
55750 + .data = &grsec_socket_client_gid,
55751 + .maxlen = sizeof(int),
55752 + .mode = 0600,
55753 + .proc_handler = &proc_dointvec,
55754 + },
55755 +#endif
55756 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55757 + {
55758 + .procname = "socket_server",
55759 + .data = &grsec_enable_socket_server,
55760 + .maxlen = sizeof(int),
55761 + .mode = 0600,
55762 + .proc_handler = &proc_dointvec,
55763 + },
55764 + {
55765 + .procname = "socket_server_gid",
55766 + .data = &grsec_socket_server_gid,
55767 + .maxlen = sizeof(int),
55768 + .mode = 0600,
55769 + .proc_handler = &proc_dointvec,
55770 + },
55771 +#endif
55772 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55773 + {
55774 + .procname = "audit_group",
55775 + .data = &grsec_enable_group,
55776 + .maxlen = sizeof(int),
55777 + .mode = 0600,
55778 + .proc_handler = &proc_dointvec,
55779 + },
55780 + {
55781 + .procname = "audit_gid",
55782 + .data = &grsec_audit_gid,
55783 + .maxlen = sizeof(int),
55784 + .mode = 0600,
55785 + .proc_handler = &proc_dointvec,
55786 + },
55787 +#endif
55788 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55789 + {
55790 + .procname = "audit_chdir",
55791 + .data = &grsec_enable_chdir,
55792 + .maxlen = sizeof(int),
55793 + .mode = 0600,
55794 + .proc_handler = &proc_dointvec,
55795 + },
55796 +#endif
55797 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55798 + {
55799 + .procname = "audit_mount",
55800 + .data = &grsec_enable_mount,
55801 + .maxlen = sizeof(int),
55802 + .mode = 0600,
55803 + .proc_handler = &proc_dointvec,
55804 + },
55805 +#endif
55806 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55807 + {
55808 + .procname = "audit_textrel",
55809 + .data = &grsec_enable_audit_textrel,
55810 + .maxlen = sizeof(int),
55811 + .mode = 0600,
55812 + .proc_handler = &proc_dointvec,
55813 + },
55814 +#endif
55815 +#ifdef CONFIG_GRKERNSEC_DMESG
55816 + {
55817 + .procname = "dmesg",
55818 + .data = &grsec_enable_dmesg,
55819 + .maxlen = sizeof(int),
55820 + .mode = 0600,
55821 + .proc_handler = &proc_dointvec,
55822 + },
55823 +#endif
55824 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55825 + {
55826 + .procname = "chroot_findtask",
55827 + .data = &grsec_enable_chroot_findtask,
55828 + .maxlen = sizeof(int),
55829 + .mode = 0600,
55830 + .proc_handler = &proc_dointvec,
55831 + },
55832 +#endif
55833 +#ifdef CONFIG_GRKERNSEC_RESLOG
55834 + {
55835 + .procname = "resource_logging",
55836 + .data = &grsec_resource_logging,
55837 + .maxlen = sizeof(int),
55838 + .mode = 0600,
55839 + .proc_handler = &proc_dointvec,
55840 + },
55841 +#endif
55842 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55843 + {
55844 + .procname = "audit_ptrace",
55845 + .data = &grsec_enable_audit_ptrace,
55846 + .maxlen = sizeof(int),
55847 + .mode = 0600,
55848 + .proc_handler = &proc_dointvec,
55849 + },
55850 +#endif
55851 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55852 + {
55853 + .procname = "harden_ptrace",
55854 + .data = &grsec_enable_harden_ptrace,
55855 + .maxlen = sizeof(int),
55856 + .mode = 0600,
55857 + .proc_handler = &proc_dointvec,
55858 + },
55859 +#endif
55860 + {
55861 + .procname = "grsec_lock",
55862 + .data = &grsec_lock,
55863 + .maxlen = sizeof(int),
55864 + .mode = 0600,
55865 + .proc_handler = &proc_dointvec,
55866 + },
55867 +#endif
55868 +#ifdef CONFIG_GRKERNSEC_ROFS
55869 + {
55870 + .procname = "romount_protect",
55871 + .data = &grsec_enable_rofs,
55872 + .maxlen = sizeof(int),
55873 + .mode = 0600,
55874 + .proc_handler = &proc_dointvec_minmax,
55875 + .extra1 = &one,
55876 + .extra2 = &one,
55877 + },
55878 +#endif
55879 + { }
55880 +};
55881 +#endif
55882 diff -urNp linux-3.0.8/grsecurity/grsec_time.c linux-3.0.8/grsecurity/grsec_time.c
55883 --- linux-3.0.8/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55884 +++ linux-3.0.8/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
55885 @@ -0,0 +1,16 @@
55886 +#include <linux/kernel.h>
55887 +#include <linux/sched.h>
55888 +#include <linux/grinternal.h>
55889 +#include <linux/module.h>
55890 +
55891 +void
55892 +gr_log_timechange(void)
55893 +{
55894 +#ifdef CONFIG_GRKERNSEC_TIME
55895 + if (grsec_enable_time)
55896 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55897 +#endif
55898 + return;
55899 +}
55900 +
55901 +EXPORT_SYMBOL(gr_log_timechange);
55902 diff -urNp linux-3.0.8/grsecurity/grsec_tpe.c linux-3.0.8/grsecurity/grsec_tpe.c
55903 --- linux-3.0.8/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55904 +++ linux-3.0.8/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
55905 @@ -0,0 +1,39 @@
55906 +#include <linux/kernel.h>
55907 +#include <linux/sched.h>
55908 +#include <linux/file.h>
55909 +#include <linux/fs.h>
55910 +#include <linux/grinternal.h>
55911 +
55912 +extern int gr_acl_tpe_check(void);
55913 +
55914 +int
55915 +gr_tpe_allow(const struct file *file)
55916 +{
55917 +#ifdef CONFIG_GRKERNSEC
55918 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55919 + const struct cred *cred = current_cred();
55920 +
55921 + if (cred->uid && ((grsec_enable_tpe &&
55922 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55923 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55924 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55925 +#else
55926 + in_group_p(grsec_tpe_gid)
55927 +#endif
55928 + ) || gr_acl_tpe_check()) &&
55929 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55930 + (inode->i_mode & S_IWOTH))))) {
55931 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55932 + return 0;
55933 + }
55934 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55935 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55936 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55937 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55938 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55939 + return 0;
55940 + }
55941 +#endif
55942 +#endif
55943 + return 1;
55944 +}
55945 diff -urNp linux-3.0.8/grsecurity/grsum.c linux-3.0.8/grsecurity/grsum.c
55946 --- linux-3.0.8/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55947 +++ linux-3.0.8/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
55948 @@ -0,0 +1,61 @@
55949 +#include <linux/err.h>
55950 +#include <linux/kernel.h>
55951 +#include <linux/sched.h>
55952 +#include <linux/mm.h>
55953 +#include <linux/scatterlist.h>
55954 +#include <linux/crypto.h>
55955 +#include <linux/gracl.h>
55956 +
55957 +
55958 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55959 +#error "crypto and sha256 must be built into the kernel"
55960 +#endif
55961 +
55962 +int
55963 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55964 +{
55965 + char *p;
55966 + struct crypto_hash *tfm;
55967 + struct hash_desc desc;
55968 + struct scatterlist sg;
55969 + unsigned char temp_sum[GR_SHA_LEN];
55970 + volatile int retval = 0;
55971 + volatile int dummy = 0;
55972 + unsigned int i;
55973 +
55974 + sg_init_table(&sg, 1);
55975 +
55976 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55977 + if (IS_ERR(tfm)) {
55978 + /* should never happen, since sha256 should be built in */
55979 + return 1;
55980 + }
55981 +
55982 + desc.tfm = tfm;
55983 + desc.flags = 0;
55984 +
55985 + crypto_hash_init(&desc);
55986 +
55987 + p = salt;
55988 + sg_set_buf(&sg, p, GR_SALT_LEN);
55989 + crypto_hash_update(&desc, &sg, sg.length);
55990 +
55991 + p = entry->pw;
55992 + sg_set_buf(&sg, p, strlen(p));
55993 +
55994 + crypto_hash_update(&desc, &sg, sg.length);
55995 +
55996 + crypto_hash_final(&desc, temp_sum);
55997 +
55998 + memset(entry->pw, 0, GR_PW_LEN);
55999 +
56000 + for (i = 0; i < GR_SHA_LEN; i++)
56001 + if (sum[i] != temp_sum[i])
56002 + retval = 1;
56003 + else
56004 + dummy = 1; // waste a cycle
56005 +
56006 + crypto_free_hash(tfm);
56007 +
56008 + return retval;
56009 +}
56010 diff -urNp linux-3.0.8/grsecurity/Kconfig linux-3.0.8/grsecurity/Kconfig
56011 --- linux-3.0.8/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
56012 +++ linux-3.0.8/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
56013 @@ -0,0 +1,1038 @@
56014 +#
56015 +# grecurity configuration
56016 +#
56017 +
56018 +menu "Grsecurity"
56019 +
56020 +config GRKERNSEC
56021 + bool "Grsecurity"
56022 + select CRYPTO
56023 + select CRYPTO_SHA256
56024 + help
56025 + If you say Y here, you will be able to configure many features
56026 + that will enhance the security of your system. It is highly
56027 + recommended that you say Y here and read through the help
56028 + for each option so that you fully understand the features and
56029 + can evaluate their usefulness for your machine.
56030 +
56031 +choice
56032 + prompt "Security Level"
56033 + depends on GRKERNSEC
56034 + default GRKERNSEC_CUSTOM
56035 +
56036 +config GRKERNSEC_LOW
56037 + bool "Low"
56038 + select GRKERNSEC_LINK
56039 + select GRKERNSEC_FIFO
56040 + select GRKERNSEC_RANDNET
56041 + select GRKERNSEC_DMESG
56042 + select GRKERNSEC_CHROOT
56043 + select GRKERNSEC_CHROOT_CHDIR
56044 +
56045 + help
56046 + If you choose this option, several of the grsecurity options will
56047 + be enabled that will give you greater protection against a number
56048 + of attacks, while assuring that none of your software will have any
56049 + conflicts with the additional security measures. If you run a lot
56050 + of unusual software, or you are having problems with the higher
56051 + security levels, you should say Y here. With this option, the
56052 + following features are enabled:
56053 +
56054 + - Linking restrictions
56055 + - FIFO restrictions
56056 + - Restricted dmesg
56057 + - Enforced chdir("/") on chroot
56058 + - Runtime module disabling
56059 +
56060 +config GRKERNSEC_MEDIUM
56061 + bool "Medium"
56062 + select PAX
56063 + select PAX_EI_PAX
56064 + select PAX_PT_PAX_FLAGS
56065 + select PAX_HAVE_ACL_FLAGS
56066 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56067 + select GRKERNSEC_CHROOT
56068 + select GRKERNSEC_CHROOT_SYSCTL
56069 + select GRKERNSEC_LINK
56070 + select GRKERNSEC_FIFO
56071 + select GRKERNSEC_DMESG
56072 + select GRKERNSEC_RANDNET
56073 + select GRKERNSEC_FORKFAIL
56074 + select GRKERNSEC_TIME
56075 + select GRKERNSEC_SIGNAL
56076 + select GRKERNSEC_CHROOT
56077 + select GRKERNSEC_CHROOT_UNIX
56078 + select GRKERNSEC_CHROOT_MOUNT
56079 + select GRKERNSEC_CHROOT_PIVOT
56080 + select GRKERNSEC_CHROOT_DOUBLE
56081 + select GRKERNSEC_CHROOT_CHDIR
56082 + select GRKERNSEC_CHROOT_MKNOD
56083 + select GRKERNSEC_PROC
56084 + select GRKERNSEC_PROC_USERGROUP
56085 + select PAX_RANDUSTACK
56086 + select PAX_ASLR
56087 + select PAX_RANDMMAP
56088 + select PAX_REFCOUNT if (X86 || SPARC64)
56089 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56090 +
56091 + help
56092 + If you say Y here, several features in addition to those included
56093 + in the low additional security level will be enabled. These
56094 + features provide even more security to your system, though in rare
56095 + cases they may be incompatible with very old or poorly written
56096 + software. If you enable this option, make sure that your auth
56097 + service (identd) is running as gid 1001. With this option,
56098 + the following features (in addition to those provided in the
56099 + low additional security level) will be enabled:
56100 +
56101 + - Failed fork logging
56102 + - Time change logging
56103 + - Signal logging
56104 + - Deny mounts in chroot
56105 + - Deny double chrooting
56106 + - Deny sysctl writes in chroot
56107 + - Deny mknod in chroot
56108 + - Deny access to abstract AF_UNIX sockets out of chroot
56109 + - Deny pivot_root in chroot
56110 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
56111 + - /proc restrictions with special GID set to 10 (usually wheel)
56112 + - Address Space Layout Randomization (ASLR)
56113 + - Prevent exploitation of most refcount overflows
56114 + - Bounds checking of copying between the kernel and userland
56115 +
56116 +config GRKERNSEC_HIGH
56117 + bool "High"
56118 + select GRKERNSEC_LINK
56119 + select GRKERNSEC_FIFO
56120 + select GRKERNSEC_DMESG
56121 + select GRKERNSEC_FORKFAIL
56122 + select GRKERNSEC_TIME
56123 + select GRKERNSEC_SIGNAL
56124 + select GRKERNSEC_CHROOT
56125 + select GRKERNSEC_CHROOT_SHMAT
56126 + select GRKERNSEC_CHROOT_UNIX
56127 + select GRKERNSEC_CHROOT_MOUNT
56128 + select GRKERNSEC_CHROOT_FCHDIR
56129 + select GRKERNSEC_CHROOT_PIVOT
56130 + select GRKERNSEC_CHROOT_DOUBLE
56131 + select GRKERNSEC_CHROOT_CHDIR
56132 + select GRKERNSEC_CHROOT_MKNOD
56133 + select GRKERNSEC_CHROOT_CAPS
56134 + select GRKERNSEC_CHROOT_SYSCTL
56135 + select GRKERNSEC_CHROOT_FINDTASK
56136 + select GRKERNSEC_SYSFS_RESTRICT
56137 + select GRKERNSEC_PROC
56138 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56139 + select GRKERNSEC_HIDESYM
56140 + select GRKERNSEC_BRUTE
56141 + select GRKERNSEC_PROC_USERGROUP
56142 + select GRKERNSEC_KMEM
56143 + select GRKERNSEC_RESLOG
56144 + select GRKERNSEC_RANDNET
56145 + select GRKERNSEC_PROC_ADD
56146 + select GRKERNSEC_CHROOT_CHMOD
56147 + select GRKERNSEC_CHROOT_NICE
56148 + select GRKERNSEC_AUDIT_MOUNT
56149 + select GRKERNSEC_MODHARDEN if (MODULES)
56150 + select GRKERNSEC_HARDEN_PTRACE
56151 + select GRKERNSEC_VM86 if (X86_32)
56152 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
56153 + select PAX
56154 + select PAX_RANDUSTACK
56155 + select PAX_ASLR
56156 + select PAX_RANDMMAP
56157 + select PAX_NOEXEC
56158 + select PAX_MPROTECT
56159 + select PAX_EI_PAX
56160 + select PAX_PT_PAX_FLAGS
56161 + select PAX_HAVE_ACL_FLAGS
56162 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
56163 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
56164 + select PAX_RANDKSTACK if (X86_TSC && X86)
56165 + select PAX_SEGMEXEC if (X86_32)
56166 + select PAX_PAGEEXEC
56167 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
56168 + select PAX_EMUTRAMP if (PARISC)
56169 + select PAX_EMUSIGRT if (PARISC)
56170 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
56171 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
56172 + select PAX_REFCOUNT if (X86 || SPARC64)
56173 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
56174 + help
56175 + If you say Y here, many of the features of grsecurity will be
56176 + enabled, which will protect you against many kinds of attacks
56177 + against your system. The heightened security comes at a cost
56178 + of an increased chance of incompatibilities with rare software
56179 + on your machine. Since this security level enables PaX, you should
56180 + view <http://pax.grsecurity.net> and read about the PaX
56181 + project. While you are there, download chpax and run it on
56182 + binaries that cause problems with PaX. Also remember that
56183 + since the /proc restrictions are enabled, you must run your
56184 + identd as gid 1001. This security level enables the following
56185 + features in addition to those listed in the low and medium
56186 + security levels:
56187 +
56188 + - Additional /proc restrictions
56189 + - Chmod restrictions in chroot
56190 + - No signals, ptrace, or viewing of processes outside of chroot
56191 + - Capability restrictions in chroot
56192 + - Deny fchdir out of chroot
56193 + - Priority restrictions in chroot
56194 + - Segmentation-based implementation of PaX
56195 + - Mprotect restrictions
56196 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56197 + - Kernel stack randomization
56198 + - Mount/unmount/remount logging
56199 + - Kernel symbol hiding
56200 + - Prevention of memory exhaustion-based exploits
56201 + - Hardening of module auto-loading
56202 + - Ptrace restrictions
56203 + - Restricted vm86 mode
56204 + - Restricted sysfs/debugfs
56205 + - Active kernel exploit response
56206 +
56207 +config GRKERNSEC_CUSTOM
56208 + bool "Custom"
56209 + help
56210 + If you say Y here, you will be able to configure every grsecurity
56211 + option, which allows you to enable many more features that aren't
56212 + covered in the basic security levels. These additional features
56213 + include TPE, socket restrictions, and the sysctl system for
56214 + grsecurity. It is advised that you read through the help for
56215 + each option to determine its usefulness in your situation.
56216 +
56217 +endchoice
56218 +
56219 +menu "Address Space Protection"
56220 +depends on GRKERNSEC
56221 +
56222 +config GRKERNSEC_KMEM
56223 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
56224 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56225 + help
56226 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56227 + be written to via mmap or otherwise to modify the running kernel.
56228 + /dev/port will also not be allowed to be opened. If you have module
56229 + support disabled, enabling this will close up four ways that are
56230 + currently used to insert malicious code into the running kernel.
56231 + Even with all these features enabled, we still highly recommend that
56232 + you use the RBAC system, as it is still possible for an attacker to
56233 + modify the running kernel through privileged I/O granted by ioperm/iopl.
56234 + If you are not using XFree86, you may be able to stop this additional
56235 + case by enabling the 'Disable privileged I/O' option. Though nothing
56236 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56237 + but only to video memory, which is the only writing we allow in this
56238 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56239 + not be allowed to mprotect it with PROT_WRITE later.
56240 + It is highly recommended that you say Y here if you meet all the
56241 + conditions above.
56242 +
56243 +config GRKERNSEC_VM86
56244 + bool "Restrict VM86 mode"
56245 + depends on X86_32
56246 +
56247 + help
56248 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56249 + make use of a special execution mode on 32bit x86 processors called
56250 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56251 + video cards and will still work with this option enabled. The purpose
56252 + of the option is to prevent exploitation of emulation errors in
56253 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
56254 + Nearly all users should be able to enable this option.
56255 +
56256 +config GRKERNSEC_IO
56257 + bool "Disable privileged I/O"
56258 + depends on X86
56259 + select RTC_CLASS
56260 + select RTC_INTF_DEV
56261 + select RTC_DRV_CMOS
56262 +
56263 + help
56264 + If you say Y here, all ioperm and iopl calls will return an error.
56265 + Ioperm and iopl can be used to modify the running kernel.
56266 + Unfortunately, some programs need this access to operate properly,
56267 + the most notable of which are XFree86 and hwclock. hwclock can be
56268 + remedied by having RTC support in the kernel, so real-time
56269 + clock support is enabled if this option is enabled, to ensure
56270 + that hwclock operates correctly. XFree86 still will not
56271 + operate correctly with this option enabled, so DO NOT CHOOSE Y
56272 + IF YOU USE XFree86. If you use XFree86 and you still want to
56273 + protect your kernel against modification, use the RBAC system.
56274 +
56275 +config GRKERNSEC_PROC_MEMMAP
56276 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
56277 + default y if (PAX_NOEXEC || PAX_ASLR)
56278 + depends on PAX_NOEXEC || PAX_ASLR
56279 + help
56280 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56281 + give no information about the addresses of its mappings if
56282 + PaX features that rely on random addresses are enabled on the task.
56283 + If you use PaX it is greatly recommended that you say Y here as it
56284 + closes up a hole that makes the full ASLR useless for suid
56285 + binaries.
56286 +
56287 +config GRKERNSEC_BRUTE
56288 + bool "Deter exploit bruteforcing"
56289 + help
56290 + If you say Y here, attempts to bruteforce exploits against forking
56291 + daemons such as apache or sshd, as well as against suid/sgid binaries
56292 + will be deterred. When a child of a forking daemon is killed by PaX
56293 + or crashes due to an illegal instruction or other suspicious signal,
56294 + the parent process will be delayed 30 seconds upon every subsequent
56295 + fork until the administrator is able to assess the situation and
56296 + restart the daemon.
56297 + In the suid/sgid case, the attempt is logged, the user has all their
56298 + processes terminated, and they are prevented from executing any further
56299 + processes for 15 minutes.
56300 + It is recommended that you also enable signal logging in the auditing
56301 + section so that logs are generated when a process triggers a suspicious
56302 + signal.
56303 + If the sysctl option is enabled, a sysctl option with name
56304 + "deter_bruteforce" is created.
56305 +
56306 +
56307 +config GRKERNSEC_MODHARDEN
56308 + bool "Harden module auto-loading"
56309 + depends on MODULES
56310 + help
56311 + If you say Y here, module auto-loading in response to use of some
56312 + feature implemented by an unloaded module will be restricted to
56313 + root users. Enabling this option helps defend against attacks
56314 + by unprivileged users who abuse the auto-loading behavior to
56315 + cause a vulnerable module to load that is then exploited.
56316 +
56317 + If this option prevents a legitimate use of auto-loading for a
56318 + non-root user, the administrator can execute modprobe manually
56319 + with the exact name of the module mentioned in the alert log.
56320 + Alternatively, the administrator can add the module to the list
56321 + of modules loaded at boot by modifying init scripts.
56322 +
56323 + Modification of init scripts will most likely be needed on
56324 + Ubuntu servers with encrypted home directory support enabled,
56325 + as the first non-root user logging in will cause the ecb(aes),
56326 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56327 +
56328 +config GRKERNSEC_HIDESYM
56329 + bool "Hide kernel symbols"
56330 + help
56331 + If you say Y here, getting information on loaded modules, and
56332 + displaying all kernel symbols through a syscall will be restricted
56333 + to users with CAP_SYS_MODULE. For software compatibility reasons,
56334 + /proc/kallsyms will be restricted to the root user. The RBAC
56335 + system can hide that entry even from root.
56336 +
56337 + This option also prevents leaking of kernel addresses through
56338 + several /proc entries.
56339 +
56340 + Note that this option is only effective provided the following
56341 + conditions are met:
56342 + 1) The kernel using grsecurity is not precompiled by some distribution
56343 + 2) You have also enabled GRKERNSEC_DMESG
56344 + 3) You are using the RBAC system and hiding other files such as your
56345 + kernel image and System.map. Alternatively, enabling this option
56346 + causes the permissions on /boot, /lib/modules, and the kernel
56347 + source directory to change at compile time to prevent
56348 + reading by non-root users.
56349 + If the above conditions are met, this option will aid in providing a
56350 + useful protection against local kernel exploitation of overflows
56351 + and arbitrary read/write vulnerabilities.
56352 +
56353 +config GRKERNSEC_KERN_LOCKOUT
56354 + bool "Active kernel exploit response"
56355 + depends on X86 || ARM || PPC || SPARC
56356 + help
56357 + If you say Y here, when a PaX alert is triggered due to suspicious
56358 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56359 + or an OOPs occurs due to bad memory accesses, instead of just
56360 + terminating the offending process (and potentially allowing
56361 + a subsequent exploit from the same user), we will take one of two
56362 + actions:
56363 + If the user was root, we will panic the system
56364 + If the user was non-root, we will log the attempt, terminate
56365 + all processes owned by the user, then prevent them from creating
56366 + any new processes until the system is restarted
56367 + This deters repeated kernel exploitation/bruteforcing attempts
56368 + and is useful for later forensics.
56369 +
56370 +endmenu
56371 +menu "Role Based Access Control Options"
56372 +depends on GRKERNSEC
56373 +
56374 +config GRKERNSEC_RBAC_DEBUG
56375 + bool
56376 +
56377 +config GRKERNSEC_NO_RBAC
56378 + bool "Disable RBAC system"
56379 + help
56380 + If you say Y here, the /dev/grsec device will be removed from the kernel,
56381 + preventing the RBAC system from being enabled. You should only say Y
56382 + here if you have no intention of using the RBAC system, so as to prevent
56383 + an attacker with root access from misusing the RBAC system to hide files
56384 + and processes when loadable module support and /dev/[k]mem have been
56385 + locked down.
56386 +
56387 +config GRKERNSEC_ACL_HIDEKERN
56388 + bool "Hide kernel processes"
56389 + help
56390 + If you say Y here, all kernel threads will be hidden to all
56391 + processes but those whose subject has the "view hidden processes"
56392 + flag.
56393 +
56394 +config GRKERNSEC_ACL_MAXTRIES
56395 + int "Maximum tries before password lockout"
56396 + default 3
56397 + help
56398 + This option enforces the maximum number of times a user can attempt
56399 + to authorize themselves with the grsecurity RBAC system before being
56400 + denied the ability to attempt authorization again for a specified time.
56401 + The lower the number, the harder it will be to brute-force a password.
56402 +
56403 +config GRKERNSEC_ACL_TIMEOUT
56404 + int "Time to wait after max password tries, in seconds"
56405 + default 30
56406 + help
56407 + This option specifies the time the user must wait after attempting to
56408 + authorize to the RBAC system with the maximum number of invalid
56409 + passwords. The higher the number, the harder it will be to brute-force
56410 + a password.
56411 +
56412 +endmenu
56413 +menu "Filesystem Protections"
56414 +depends on GRKERNSEC
56415 +
56416 +config GRKERNSEC_PROC
56417 + bool "Proc restrictions"
56418 + help
56419 + If you say Y here, the permissions of the /proc filesystem
56420 + will be altered to enhance system security and privacy. You MUST
56421 + choose either a user only restriction or a user and group restriction.
56422 + Depending upon the option you choose, you can either restrict users to
56423 + see only the processes they themselves run, or choose a group that can
56424 + view all processes and files normally restricted to root if you choose
56425 + the "restrict to user only" option. NOTE: If you're running identd as
56426 + a non-root user, you will have to run it as the group you specify here.
56427 +
56428 +config GRKERNSEC_PROC_USER
56429 + bool "Restrict /proc to user only"
56430 + depends on GRKERNSEC_PROC
56431 + help
56432 + If you say Y here, non-root users will only be able to view their own
56433 + processes, and restricts them from viewing network-related information,
56434 + and viewing kernel symbol and module information.
56435 +
56436 +config GRKERNSEC_PROC_USERGROUP
56437 + bool "Allow special group"
56438 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56439 + help
56440 + If you say Y here, you will be able to select a group that will be
56441 + able to view all processes and network-related information. If you've
56442 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56443 + remain hidden. This option is useful if you want to run identd as
56444 + a non-root user.
56445 +
56446 +config GRKERNSEC_PROC_GID
56447 + int "GID for special group"
56448 + depends on GRKERNSEC_PROC_USERGROUP
56449 + default 1001
56450 +
56451 +config GRKERNSEC_PROC_ADD
56452 + bool "Additional restrictions"
56453 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56454 + help
56455 + If you say Y here, additional restrictions will be placed on
56456 + /proc that keep normal users from viewing device information and
56457 + slabinfo information that could be useful for exploits.
56458 +
56459 +config GRKERNSEC_LINK
56460 + bool "Linking restrictions"
56461 + help
56462 + If you say Y here, /tmp race exploits will be prevented, since users
56463 + will no longer be able to follow symlinks owned by other users in
56464 + world-writable +t directories (e.g. /tmp), unless the owner of the
56465 + symlink is the owner of the directory. users will also not be
56466 + able to hardlink to files they do not own. If the sysctl option is
56467 + enabled, a sysctl option with name "linking_restrictions" is created.
56468 +
56469 +config GRKERNSEC_FIFO
56470 + bool "FIFO restrictions"
56471 + help
56472 + If you say Y here, users will not be able to write to FIFOs they don't
56473 + own in world-writable +t directories (e.g. /tmp), unless the owner of
56474 + the FIFO is the same owner of the directory it's held in. If the sysctl
56475 + option is enabled, a sysctl option with name "fifo_restrictions" is
56476 + created.
56477 +
56478 +config GRKERNSEC_SYSFS_RESTRICT
56479 + bool "Sysfs/debugfs restriction"
56480 + depends on SYSFS
56481 + help
56482 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56483 + any filesystem normally mounted under it (e.g. debugfs) will only
56484 + be accessible by root. These filesystems generally provide access
56485 + to hardware and debug information that isn't appropriate for unprivileged
56486 + users of the system. Sysfs and debugfs have also become a large source
56487 + of new vulnerabilities, ranging from infoleaks to local compromise.
56488 + There has been very little oversight with an eye toward security involved
56489 + in adding new exporters of information to these filesystems, so their
56490 + use is discouraged.
56491 + This option is equivalent to a chmod 0700 of the mount paths.
56492 +
56493 +config GRKERNSEC_ROFS
56494 + bool "Runtime read-only mount protection"
56495 + help
56496 + If you say Y here, a sysctl option with name "romount_protect" will
56497 + be created. By setting this option to 1 at runtime, filesystems
56498 + will be protected in the following ways:
56499 + * No new writable mounts will be allowed
56500 + * Existing read-only mounts won't be able to be remounted read/write
56501 + * Write operations will be denied on all block devices
56502 + This option acts independently of grsec_lock: once it is set to 1,
56503 + it cannot be turned off. Therefore, please be mindful of the resulting
56504 + behavior if this option is enabled in an init script on a read-only
56505 + filesystem. This feature is mainly intended for secure embedded systems.
56506 +
56507 +config GRKERNSEC_CHROOT
56508 + bool "Chroot jail restrictions"
56509 + help
56510 + If you say Y here, you will be able to choose several options that will
56511 + make breaking out of a chrooted jail much more difficult. If you
56512 + encounter no software incompatibilities with the following options, it
56513 + is recommended that you enable each one.
56514 +
56515 +config GRKERNSEC_CHROOT_MOUNT
56516 + bool "Deny mounts"
56517 + depends on GRKERNSEC_CHROOT
56518 + help
56519 + If you say Y here, processes inside a chroot will not be able to
56520 + mount or remount filesystems. If the sysctl option is enabled, a
56521 + sysctl option with name "chroot_deny_mount" is created.
56522 +
56523 +config GRKERNSEC_CHROOT_DOUBLE
56524 + bool "Deny double-chroots"
56525 + depends on GRKERNSEC_CHROOT
56526 + help
56527 + If you say Y here, processes inside a chroot will not be able to chroot
56528 + again outside the chroot. This is a widely used method of breaking
56529 + out of a chroot jail and should not be allowed. If the sysctl
56530 + option is enabled, a sysctl option with name
56531 + "chroot_deny_chroot" is created.
56532 +
56533 +config GRKERNSEC_CHROOT_PIVOT
56534 + bool "Deny pivot_root in chroot"
56535 + depends on GRKERNSEC_CHROOT
56536 + help
56537 + If you say Y here, processes inside a chroot will not be able to use
56538 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56539 + works similar to chroot in that it changes the root filesystem. This
56540 + function could be misused in a chrooted process to attempt to break out
56541 + of the chroot, and therefore should not be allowed. If the sysctl
56542 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56543 + created.
56544 +
56545 +config GRKERNSEC_CHROOT_CHDIR
56546 + bool "Enforce chdir(\"/\") on all chroots"
56547 + depends on GRKERNSEC_CHROOT
56548 + help
56549 + If you say Y here, the current working directory of all newly-chrooted
56550 + applications will be set to the the root directory of the chroot.
56551 + The man page on chroot(2) states:
56552 + Note that this call does not change the current working
56553 + directory, so that `.' can be outside the tree rooted at
56554 + `/'. In particular, the super-user can escape from a
56555 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56556 +
56557 + It is recommended that you say Y here, since it's not known to break
56558 + any software. If the sysctl option is enabled, a sysctl option with
56559 + name "chroot_enforce_chdir" is created.
56560 +
56561 +config GRKERNSEC_CHROOT_CHMOD
56562 + bool "Deny (f)chmod +s"
56563 + depends on GRKERNSEC_CHROOT
56564 + help
56565 + If you say Y here, processes inside a chroot will not be able to chmod
56566 + or fchmod files to make them have suid or sgid bits. This protects
56567 + against another published method of breaking a chroot. If the sysctl
56568 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56569 + created.
56570 +
56571 +config GRKERNSEC_CHROOT_FCHDIR
56572 + bool "Deny fchdir out of chroot"
56573 + depends on GRKERNSEC_CHROOT
56574 + help
56575 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56576 + to a file descriptor of the chrooting process that points to a directory
56577 + outside the filesystem will be stopped. If the sysctl option
56578 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56579 +
56580 +config GRKERNSEC_CHROOT_MKNOD
56581 + bool "Deny mknod"
56582 + depends on GRKERNSEC_CHROOT
56583 + help
56584 + If you say Y here, processes inside a chroot will not be allowed to
56585 + mknod. The problem with using mknod inside a chroot is that it
56586 + would allow an attacker to create a device entry that is the same
56587 + as one on the physical root of your system, which could range from
56588 + anything from the console device to a device for your harddrive (which
56589 + they could then use to wipe the drive or steal data). It is recommended
56590 + that you say Y here, unless you run into software incompatibilities.
56591 + If the sysctl option is enabled, a sysctl option with name
56592 + "chroot_deny_mknod" is created.
56593 +
56594 +config GRKERNSEC_CHROOT_SHMAT
56595 + bool "Deny shmat() out of chroot"
56596 + depends on GRKERNSEC_CHROOT
56597 + help
56598 + If you say Y here, processes inside a chroot will not be able to attach
56599 + to shared memory segments that were created outside of the chroot jail.
56600 + It is recommended that you say Y here. If the sysctl option is enabled,
56601 + a sysctl option with name "chroot_deny_shmat" is created.
56602 +
56603 +config GRKERNSEC_CHROOT_UNIX
56604 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56605 + depends on GRKERNSEC_CHROOT
56606 + help
56607 + If you say Y here, processes inside a chroot will not be able to
56608 + connect to abstract (meaning not belonging to a filesystem) Unix
56609 + domain sockets that were bound outside of a chroot. It is recommended
56610 + that you say Y here. If the sysctl option is enabled, a sysctl option
56611 + with name "chroot_deny_unix" is created.
56612 +
56613 +config GRKERNSEC_CHROOT_FINDTASK
56614 + bool "Protect outside processes"
56615 + depends on GRKERNSEC_CHROOT
56616 + help
56617 + If you say Y here, processes inside a chroot will not be able to
56618 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56619 + getsid, or view any process outside of the chroot. If the sysctl
56620 + option is enabled, a sysctl option with name "chroot_findtask" is
56621 + created.
56622 +
56623 +config GRKERNSEC_CHROOT_NICE
56624 + bool "Restrict priority changes"
56625 + depends on GRKERNSEC_CHROOT
56626 + help
56627 + If you say Y here, processes inside a chroot will not be able to raise
56628 + the priority of processes in the chroot, or alter the priority of
56629 + processes outside the chroot. This provides more security than simply
56630 + removing CAP_SYS_NICE from the process' capability set. If the
56631 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56632 + is created.
56633 +
56634 +config GRKERNSEC_CHROOT_SYSCTL
56635 + bool "Deny sysctl writes"
56636 + depends on GRKERNSEC_CHROOT
56637 + help
56638 + If you say Y here, an attacker in a chroot will not be able to
56639 + write to sysctl entries, either by sysctl(2) or through a /proc
56640 + interface. It is strongly recommended that you say Y here. If the
56641 + sysctl option is enabled, a sysctl option with name
56642 + "chroot_deny_sysctl" is created.
56643 +
56644 +config GRKERNSEC_CHROOT_CAPS
56645 + bool "Capability restrictions"
56646 + depends on GRKERNSEC_CHROOT
56647 + help
56648 + If you say Y here, the capabilities on all processes within a
56649 + chroot jail will be lowered to stop module insertion, raw i/o,
56650 + system and net admin tasks, rebooting the system, modifying immutable
56651 + files, modifying IPC owned by another, and changing the system time.
56652 + This is left an option because it can break some apps. Disable this
56653 + if your chrooted apps are having problems performing those kinds of
56654 + tasks. If the sysctl option is enabled, a sysctl option with
56655 + name "chroot_caps" is created.
56656 +
56657 +endmenu
56658 +menu "Kernel Auditing"
56659 +depends on GRKERNSEC
56660 +
56661 +config GRKERNSEC_AUDIT_GROUP
56662 + bool "Single group for auditing"
56663 + help
56664 + If you say Y here, the exec, chdir, and (un)mount logging features
56665 + will only operate on a group you specify. This option is recommended
56666 + if you only want to watch certain users instead of having a large
56667 + amount of logs from the entire system. If the sysctl option is enabled,
56668 + a sysctl option with name "audit_group" is created.
56669 +
56670 +config GRKERNSEC_AUDIT_GID
56671 + int "GID for auditing"
56672 + depends on GRKERNSEC_AUDIT_GROUP
56673 + default 1007
56674 +
56675 +config GRKERNSEC_EXECLOG
56676 + bool "Exec logging"
56677 + help
56678 + If you say Y here, all execve() calls will be logged (since the
56679 + other exec*() calls are frontends to execve(), all execution
56680 + will be logged). Useful for shell-servers that like to keep track
56681 + of their users. If the sysctl option is enabled, a sysctl option with
56682 + name "exec_logging" is created.
56683 + WARNING: This option when enabled will produce a LOT of logs, especially
56684 + on an active system.
56685 +
56686 +config GRKERNSEC_RESLOG
56687 + bool "Resource logging"
56688 + help
56689 + If you say Y here, all attempts to overstep resource limits will
56690 + be logged with the resource name, the requested size, and the current
56691 + limit. It is highly recommended that you say Y here. If the sysctl
56692 + option is enabled, a sysctl option with name "resource_logging" is
56693 + created. If the RBAC system is enabled, the sysctl value is ignored.
56694 +
56695 +config GRKERNSEC_CHROOT_EXECLOG
56696 + bool "Log execs within chroot"
56697 + help
56698 + If you say Y here, all executions inside a chroot jail will be logged
56699 + to syslog. This can cause a large amount of logs if certain
56700 + applications (eg. djb's daemontools) are installed on the system, and
56701 + is therefore left as an option. If the sysctl option is enabled, a
56702 + sysctl option with name "chroot_execlog" is created.
56703 +
56704 +config GRKERNSEC_AUDIT_PTRACE
56705 + bool "Ptrace logging"
56706 + help
56707 + If you say Y here, all attempts to attach to a process via ptrace
56708 + will be logged. If the sysctl option is enabled, a sysctl option
56709 + with name "audit_ptrace" is created.
56710 +
56711 +config GRKERNSEC_AUDIT_CHDIR
56712 + bool "Chdir logging"
56713 + help
56714 + If you say Y here, all chdir() calls will be logged. If the sysctl
56715 + option is enabled, a sysctl option with name "audit_chdir" is created.
56716 +
56717 +config GRKERNSEC_AUDIT_MOUNT
56718 + bool "(Un)Mount logging"
56719 + help
56720 + If you say Y here, all mounts and unmounts will be logged. If the
56721 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56722 + created.
56723 +
56724 +config GRKERNSEC_SIGNAL
56725 + bool "Signal logging"
56726 + help
56727 + If you say Y here, certain important signals will be logged, such as
56728 + SIGSEGV, which will as a result inform you of when a error in a program
56729 + occurred, which in some cases could mean a possible exploit attempt.
56730 + If the sysctl option is enabled, a sysctl option with name
56731 + "signal_logging" is created.
56732 +
56733 +config GRKERNSEC_FORKFAIL
56734 + bool "Fork failure logging"
56735 + help
56736 + If you say Y here, all failed fork() attempts will be logged.
56737 + This could suggest a fork bomb, or someone attempting to overstep
56738 + their process limit. If the sysctl option is enabled, a sysctl option
56739 + with name "forkfail_logging" is created.
56740 +
56741 +config GRKERNSEC_TIME
56742 + bool "Time change logging"
56743 + help
56744 + If you say Y here, any changes of the system clock will be logged.
56745 + If the sysctl option is enabled, a sysctl option with name
56746 + "timechange_logging" is created.
56747 +
56748 +config GRKERNSEC_PROC_IPADDR
56749 + bool "/proc/<pid>/ipaddr support"
56750 + help
56751 + If you say Y here, a new entry will be added to each /proc/<pid>
56752 + directory that contains the IP address of the person using the task.
56753 + The IP is carried across local TCP and AF_UNIX stream sockets.
56754 + This information can be useful for IDS/IPSes to perform remote response
56755 + to a local attack. The entry is readable by only the owner of the
56756 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56757 + the RBAC system), and thus does not create privacy concerns.
56758 +
56759 +config GRKERNSEC_RWXMAP_LOG
56760 + bool 'Denied RWX mmap/mprotect logging'
56761 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56762 + help
56763 + If you say Y here, calls to mmap() and mprotect() with explicit
56764 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56765 + denied by the PAX_MPROTECT feature. If the sysctl option is
56766 + enabled, a sysctl option with name "rwxmap_logging" is created.
56767 +
56768 +config GRKERNSEC_AUDIT_TEXTREL
56769 + bool 'ELF text relocations logging (READ HELP)'
56770 + depends on PAX_MPROTECT
56771 + help
56772 + If you say Y here, text relocations will be logged with the filename
56773 + of the offending library or binary. The purpose of the feature is
56774 + to help Linux distribution developers get rid of libraries and
56775 + binaries that need text relocations which hinder the future progress
56776 + of PaX. Only Linux distribution developers should say Y here, and
56777 + never on a production machine, as this option creates an information
56778 + leak that could aid an attacker in defeating the randomization of
56779 + a single memory region. If the sysctl option is enabled, a sysctl
56780 + option with name "audit_textrel" is created.
56781 +
56782 +endmenu
56783 +
56784 +menu "Executable Protections"
56785 +depends on GRKERNSEC
56786 +
56787 +config GRKERNSEC_DMESG
56788 + bool "Dmesg(8) restriction"
56789 + help
56790 + If you say Y here, non-root users will not be able to use dmesg(8)
56791 + to view up to the last 4kb of messages in the kernel's log buffer.
56792 + The kernel's log buffer often contains kernel addresses and other
56793 + identifying information useful to an attacker in fingerprinting a
56794 + system for a targeted exploit.
56795 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56796 + created.
56797 +
56798 +config GRKERNSEC_HARDEN_PTRACE
56799 + bool "Deter ptrace-based process snooping"
56800 + help
56801 + If you say Y here, TTY sniffers and other malicious monitoring
56802 + programs implemented through ptrace will be defeated. If you
56803 + have been using the RBAC system, this option has already been
56804 + enabled for several years for all users, with the ability to make
56805 + fine-grained exceptions.
56806 +
56807 + This option only affects the ability of non-root users to ptrace
56808 + processes that are not a descendent of the ptracing process.
56809 + This means that strace ./binary and gdb ./binary will still work,
56810 + but attaching to arbitrary processes will not. If the sysctl
56811 + option is enabled, a sysctl option with name "harden_ptrace" is
56812 + created.
56813 +
56814 +config GRKERNSEC_TPE
56815 + bool "Trusted Path Execution (TPE)"
56816 + help
56817 + If you say Y here, you will be able to choose a gid to add to the
56818 + supplementary groups of users you want to mark as "untrusted."
56819 + These users will not be able to execute any files that are not in
56820 + root-owned directories writable only by root. If the sysctl option
56821 + is enabled, a sysctl option with name "tpe" is created.
56822 +
56823 +config GRKERNSEC_TPE_ALL
56824 + bool "Partially restrict all non-root users"
56825 + depends on GRKERNSEC_TPE
56826 + help
56827 + If you say Y here, all non-root users will be covered under
56828 + a weaker TPE restriction. This is separate from, and in addition to,
56829 + the main TPE options that you have selected elsewhere. Thus, if a
56830 + "trusted" GID is chosen, this restriction applies to even that GID.
56831 + Under this restriction, all non-root users will only be allowed to
56832 + execute files in directories they own that are not group or
56833 + world-writable, or in directories owned by root and writable only by
56834 + root. If the sysctl option is enabled, a sysctl option with name
56835 + "tpe_restrict_all" is created.
56836 +
56837 +config GRKERNSEC_TPE_INVERT
56838 + bool "Invert GID option"
56839 + depends on GRKERNSEC_TPE
56840 + help
56841 + If you say Y here, the group you specify in the TPE configuration will
56842 + decide what group TPE restrictions will be *disabled* for. This
56843 + option is useful if you want TPE restrictions to be applied to most
56844 + users on the system. If the sysctl option is enabled, a sysctl option
56845 + with name "tpe_invert" is created. Unlike other sysctl options, this
56846 + entry will default to on for backward-compatibility.
56847 +
56848 +config GRKERNSEC_TPE_GID
56849 + int "GID for untrusted users"
56850 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56851 + default 1005
56852 + help
56853 + Setting this GID determines what group TPE restrictions will be
56854 + *enabled* for. If the sysctl option is enabled, a sysctl option
56855 + with name "tpe_gid" is created.
56856 +
56857 +config GRKERNSEC_TPE_GID
56858 + int "GID for trusted users"
56859 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56860 + default 1005
56861 + help
56862 + Setting this GID determines what group TPE restrictions will be
56863 + *disabled* for. If the sysctl option is enabled, a sysctl option
56864 + with name "tpe_gid" is created.
56865 +
56866 +endmenu
56867 +menu "Network Protections"
56868 +depends on GRKERNSEC
56869 +
56870 +config GRKERNSEC_RANDNET
56871 + bool "Larger entropy pools"
56872 + help
56873 + If you say Y here, the entropy pools used for many features of Linux
56874 + and grsecurity will be doubled in size. Since several grsecurity
56875 + features use additional randomness, it is recommended that you say Y
56876 + here. Saying Y here has a similar effect as modifying
56877 + /proc/sys/kernel/random/poolsize.
56878 +
56879 +config GRKERNSEC_BLACKHOLE
56880 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56881 + depends on NET
56882 + help
56883 + If you say Y here, neither TCP resets nor ICMP
56884 + destination-unreachable packets will be sent in response to packets
56885 + sent to ports for which no associated listening process exists.
56886 + This feature supports both IPV4 and IPV6 and exempts the
56887 + loopback interface from blackholing. Enabling this feature
56888 + makes a host more resilient to DoS attacks and reduces network
56889 + visibility against scanners.
56890 +
56891 + The blackhole feature as-implemented is equivalent to the FreeBSD
56892 + blackhole feature, as it prevents RST responses to all packets, not
56893 + just SYNs. Under most application behavior this causes no
56894 + problems, but applications (like haproxy) may not close certain
56895 + connections in a way that cleanly terminates them on the remote
56896 + end, leaving the remote host in LAST_ACK state. Because of this
56897 + side-effect and to prevent intentional LAST_ACK DoSes, this
56898 + feature also adds automatic mitigation against such attacks.
56899 + The mitigation drastically reduces the amount of time a socket
56900 + can spend in LAST_ACK state. If you're using haproxy and not
56901 + all servers it connects to have this option enabled, consider
56902 + disabling this feature on the haproxy host.
56903 +
56904 + If the sysctl option is enabled, two sysctl options with names
56905 + "ip_blackhole" and "lastack_retries" will be created.
56906 + While "ip_blackhole" takes the standard zero/non-zero on/off
56907 + toggle, "lastack_retries" uses the same kinds of values as
56908 + "tcp_retries1" and "tcp_retries2". The default value of 4
56909 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56910 + state.
56911 +
56912 +config GRKERNSEC_SOCKET
56913 + bool "Socket restrictions"
56914 + depends on NET
56915 + help
56916 + If you say Y here, you will be able to choose from several options.
56917 + If you assign a GID on your system and add it to the supplementary
56918 + groups of users you want to restrict socket access to, this patch
56919 + will perform up to three things, based on the option(s) you choose.
56920 +
56921 +config GRKERNSEC_SOCKET_ALL
56922 + bool "Deny any sockets to group"
56923 + depends on GRKERNSEC_SOCKET
56924 + help
56925 + If you say Y here, you will be able to choose a GID of whose users will
56926 + be unable to connect to other hosts from your machine or run server
56927 + applications from your machine. If the sysctl option is enabled, a
56928 + sysctl option with name "socket_all" is created.
56929 +
56930 +config GRKERNSEC_SOCKET_ALL_GID
56931 + int "GID to deny all sockets for"
56932 + depends on GRKERNSEC_SOCKET_ALL
56933 + default 1004
56934 + help
56935 + Here you can choose the GID to disable socket access for. Remember to
56936 + add the users you want socket access disabled for to the GID
56937 + specified here. If the sysctl option is enabled, a sysctl option
56938 + with name "socket_all_gid" is created.
56939 +
56940 +config GRKERNSEC_SOCKET_CLIENT
56941 + bool "Deny client sockets to group"
56942 + depends on GRKERNSEC_SOCKET
56943 + help
56944 + If you say Y here, you will be able to choose a GID of whose users will
56945 + be unable to connect to other hosts from your machine, but will be
56946 + able to run servers. If this option is enabled, all users in the group
56947 + you specify will have to use passive mode when initiating ftp transfers
56948 + from the shell on your machine. If the sysctl option is enabled, a
56949 + sysctl option with name "socket_client" is created.
56950 +
56951 +config GRKERNSEC_SOCKET_CLIENT_GID
56952 + int "GID to deny client sockets for"
56953 + depends on GRKERNSEC_SOCKET_CLIENT
56954 + default 1003
56955 + help
56956 + Here you can choose the GID to disable client socket access for.
56957 + Remember to add the users you want client socket access disabled for to
56958 + the GID specified here. If the sysctl option is enabled, a sysctl
56959 + option with name "socket_client_gid" is created.
56960 +
56961 +config GRKERNSEC_SOCKET_SERVER
56962 + bool "Deny server sockets to group"
56963 + depends on GRKERNSEC_SOCKET
56964 + help
56965 + If you say Y here, you will be able to choose a GID of whose users will
56966 + be unable to run server applications from your machine. If the sysctl
56967 + option is enabled, a sysctl option with name "socket_server" is created.
56968 +
56969 +config GRKERNSEC_SOCKET_SERVER_GID
56970 + int "GID to deny server sockets for"
56971 + depends on GRKERNSEC_SOCKET_SERVER
56972 + default 1002
56973 + help
56974 + Here you can choose the GID to disable server socket access for.
56975 + Remember to add the users you want server socket access disabled for to
56976 + the GID specified here. If the sysctl option is enabled, a sysctl
56977 + option with name "socket_server_gid" is created.
56978 +
56979 +endmenu
56980 +menu "Sysctl support"
56981 +depends on GRKERNSEC && SYSCTL
56982 +
56983 +config GRKERNSEC_SYSCTL
56984 + bool "Sysctl support"
56985 + help
56986 + If you say Y here, you will be able to change the options that
56987 + grsecurity runs with at bootup, without having to recompile your
56988 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56989 + to enable (1) or disable (0) various features. All the sysctl entries
56990 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56991 + All features enabled in the kernel configuration are disabled at boot
56992 + if you do not say Y to the "Turn on features by default" option.
56993 + All options should be set at startup, and the grsec_lock entry should
56994 + be set to a non-zero value after all the options are set.
56995 + *THIS IS EXTREMELY IMPORTANT*
56996 +
56997 +config GRKERNSEC_SYSCTL_DISTRO
56998 + bool "Extra sysctl support for distro makers (READ HELP)"
56999 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57000 + help
57001 + If you say Y here, additional sysctl options will be created
57002 + for features that affect processes running as root. Therefore,
57003 + it is critical when using this option that the grsec_lock entry be
57004 + enabled after boot. Only distros with prebuilt kernel packages
57005 + with this option enabled that can ensure grsec_lock is enabled
57006 + after boot should use this option.
57007 + *Failure to set grsec_lock after boot makes all grsec features
57008 + this option covers useless*
57009 +
57010 + Currently this option creates the following sysctl entries:
57011 + "Disable Privileged I/O": "disable_priv_io"
57012 +
57013 +config GRKERNSEC_SYSCTL_ON
57014 + bool "Turn on features by default"
57015 + depends on GRKERNSEC_SYSCTL
57016 + help
57017 + If you say Y here, instead of having all features enabled in the
57018 + kernel configuration disabled at boot time, the features will be
57019 + enabled at boot time. It is recommended you say Y here unless
57020 + there is some reason you would want all sysctl-tunable features to
57021 + be disabled by default. As mentioned elsewhere, it is important
57022 + to enable the grsec_lock entry once you have finished modifying
57023 + the sysctl entries.
57024 +
57025 +endmenu
57026 +menu "Logging Options"
57027 +depends on GRKERNSEC
57028 +
57029 +config GRKERNSEC_FLOODTIME
57030 + int "Seconds in between log messages (minimum)"
57031 + default 10
57032 + help
57033 + This option allows you to enforce the number of seconds between
57034 + grsecurity log messages. The default should be suitable for most
57035 + people, however, if you choose to change it, choose a value small enough
57036 + to allow informative logs to be produced, but large enough to
57037 + prevent flooding.
57038 +
57039 +config GRKERNSEC_FLOODBURST
57040 + int "Number of messages in a burst (maximum)"
57041 + default 6
57042 + help
57043 + This option allows you to choose the maximum number of messages allowed
57044 + within the flood time interval you chose in a separate option. The
57045 + default should be suitable for most people, however if you find that
57046 + many of your logs are being interpreted as flooding, you may want to
57047 + raise this value.
57048 +
57049 +endmenu
57050 +
57051 +endmenu
57052 diff -urNp linux-3.0.8/grsecurity/Makefile linux-3.0.8/grsecurity/Makefile
57053 --- linux-3.0.8/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
57054 +++ linux-3.0.8/grsecurity/Makefile 2011-10-17 06:45:43.000000000 -0400
57055 @@ -0,0 +1,36 @@
57056 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57057 +# during 2001-2009 it has been completely redesigned by Brad Spengler
57058 +# into an RBAC system
57059 +#
57060 +# All code in this directory and various hooks inserted throughout the kernel
57061 +# are copyright Brad Spengler - Open Source Security, Inc., and released
57062 +# under the GPL v2 or higher
57063 +
57064 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57065 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
57066 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57067 +
57068 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57069 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57070 + gracl_learn.o grsec_log.o
57071 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57072 +
57073 +ifdef CONFIG_NET
57074 +obj-y += grsec_sock.o
57075 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57076 +endif
57077 +
57078 +ifndef CONFIG_GRKERNSEC
57079 +obj-y += grsec_disabled.o
57080 +endif
57081 +
57082 +ifdef CONFIG_GRKERNSEC_HIDESYM
57083 +extra-y := grsec_hidesym.o
57084 +$(obj)/grsec_hidesym.o:
57085 + @-chmod -f 500 /boot
57086 + @-chmod -f 500 /lib/modules
57087 + @-chmod -f 500 /lib64/modules
57088 + @-chmod -f 500 /lib32/modules
57089 + @-chmod -f 700 .
57090 + @echo ' grsec: protected kernel image paths'
57091 +endif
57092 diff -urNp linux-3.0.8/include/acpi/acpi_bus.h linux-3.0.8/include/acpi/acpi_bus.h
57093 --- linux-3.0.8/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
57094 +++ linux-3.0.8/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
57095 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57096 acpi_op_bind bind;
57097 acpi_op_unbind unbind;
57098 acpi_op_notify notify;
57099 -};
57100 +} __no_const;
57101
57102 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57103
57104 diff -urNp linux-3.0.8/include/asm-generic/atomic-long.h linux-3.0.8/include/asm-generic/atomic-long.h
57105 --- linux-3.0.8/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
57106 +++ linux-3.0.8/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
57107 @@ -22,6 +22,12 @@
57108
57109 typedef atomic64_t atomic_long_t;
57110
57111 +#ifdef CONFIG_PAX_REFCOUNT
57112 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57113 +#else
57114 +typedef atomic64_t atomic_long_unchecked_t;
57115 +#endif
57116 +
57117 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57118
57119 static inline long atomic_long_read(atomic_long_t *l)
57120 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
57121 return (long)atomic64_read(v);
57122 }
57123
57124 +#ifdef CONFIG_PAX_REFCOUNT
57125 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57126 +{
57127 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57128 +
57129 + return (long)atomic64_read_unchecked(v);
57130 +}
57131 +#endif
57132 +
57133 static inline void atomic_long_set(atomic_long_t *l, long i)
57134 {
57135 atomic64_t *v = (atomic64_t *)l;
57136 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
57137 atomic64_set(v, i);
57138 }
57139
57140 +#ifdef CONFIG_PAX_REFCOUNT
57141 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57142 +{
57143 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57144 +
57145 + atomic64_set_unchecked(v, i);
57146 +}
57147 +#endif
57148 +
57149 static inline void atomic_long_inc(atomic_long_t *l)
57150 {
57151 atomic64_t *v = (atomic64_t *)l;
57152 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
57153 atomic64_inc(v);
57154 }
57155
57156 +#ifdef CONFIG_PAX_REFCOUNT
57157 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57158 +{
57159 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57160 +
57161 + atomic64_inc_unchecked(v);
57162 +}
57163 +#endif
57164 +
57165 static inline void atomic_long_dec(atomic_long_t *l)
57166 {
57167 atomic64_t *v = (atomic64_t *)l;
57168 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
57169 atomic64_dec(v);
57170 }
57171
57172 +#ifdef CONFIG_PAX_REFCOUNT
57173 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57174 +{
57175 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57176 +
57177 + atomic64_dec_unchecked(v);
57178 +}
57179 +#endif
57180 +
57181 static inline void atomic_long_add(long i, atomic_long_t *l)
57182 {
57183 atomic64_t *v = (atomic64_t *)l;
57184 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
57185 atomic64_add(i, v);
57186 }
57187
57188 +#ifdef CONFIG_PAX_REFCOUNT
57189 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57190 +{
57191 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57192 +
57193 + atomic64_add_unchecked(i, v);
57194 +}
57195 +#endif
57196 +
57197 static inline void atomic_long_sub(long i, atomic_long_t *l)
57198 {
57199 atomic64_t *v = (atomic64_t *)l;
57200 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
57201 atomic64_sub(i, v);
57202 }
57203
57204 +#ifdef CONFIG_PAX_REFCOUNT
57205 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57206 +{
57207 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57208 +
57209 + atomic64_sub_unchecked(i, v);
57210 +}
57211 +#endif
57212 +
57213 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57214 {
57215 atomic64_t *v = (atomic64_t *)l;
57216 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
57217 return (long)atomic64_inc_return(v);
57218 }
57219
57220 +#ifdef CONFIG_PAX_REFCOUNT
57221 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57222 +{
57223 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57224 +
57225 + return (long)atomic64_inc_return_unchecked(v);
57226 +}
57227 +#endif
57228 +
57229 static inline long atomic_long_dec_return(atomic_long_t *l)
57230 {
57231 atomic64_t *v = (atomic64_t *)l;
57232 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
57233
57234 typedef atomic_t atomic_long_t;
57235
57236 +#ifdef CONFIG_PAX_REFCOUNT
57237 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57238 +#else
57239 +typedef atomic_t atomic_long_unchecked_t;
57240 +#endif
57241 +
57242 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57243 static inline long atomic_long_read(atomic_long_t *l)
57244 {
57245 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
57246 return (long)atomic_read(v);
57247 }
57248
57249 +#ifdef CONFIG_PAX_REFCOUNT
57250 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57251 +{
57252 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57253 +
57254 + return (long)atomic_read_unchecked(v);
57255 +}
57256 +#endif
57257 +
57258 static inline void atomic_long_set(atomic_long_t *l, long i)
57259 {
57260 atomic_t *v = (atomic_t *)l;
57261 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
57262 atomic_set(v, i);
57263 }
57264
57265 +#ifdef CONFIG_PAX_REFCOUNT
57266 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57267 +{
57268 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57269 +
57270 + atomic_set_unchecked(v, i);
57271 +}
57272 +#endif
57273 +
57274 static inline void atomic_long_inc(atomic_long_t *l)
57275 {
57276 atomic_t *v = (atomic_t *)l;
57277 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
57278 atomic_inc(v);
57279 }
57280
57281 +#ifdef CONFIG_PAX_REFCOUNT
57282 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57283 +{
57284 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57285 +
57286 + atomic_inc_unchecked(v);
57287 +}
57288 +#endif
57289 +
57290 static inline void atomic_long_dec(atomic_long_t *l)
57291 {
57292 atomic_t *v = (atomic_t *)l;
57293 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
57294 atomic_dec(v);
57295 }
57296
57297 +#ifdef CONFIG_PAX_REFCOUNT
57298 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57299 +{
57300 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57301 +
57302 + atomic_dec_unchecked(v);
57303 +}
57304 +#endif
57305 +
57306 static inline void atomic_long_add(long i, atomic_long_t *l)
57307 {
57308 atomic_t *v = (atomic_t *)l;
57309 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
57310 atomic_add(i, v);
57311 }
57312
57313 +#ifdef CONFIG_PAX_REFCOUNT
57314 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57315 +{
57316 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57317 +
57318 + atomic_add_unchecked(i, v);
57319 +}
57320 +#endif
57321 +
57322 static inline void atomic_long_sub(long i, atomic_long_t *l)
57323 {
57324 atomic_t *v = (atomic_t *)l;
57325 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
57326 atomic_sub(i, v);
57327 }
57328
57329 +#ifdef CONFIG_PAX_REFCOUNT
57330 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57331 +{
57332 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57333 +
57334 + atomic_sub_unchecked(i, v);
57335 +}
57336 +#endif
57337 +
57338 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57339 {
57340 atomic_t *v = (atomic_t *)l;
57341 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
57342 return (long)atomic_inc_return(v);
57343 }
57344
57345 +#ifdef CONFIG_PAX_REFCOUNT
57346 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57347 +{
57348 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57349 +
57350 + return (long)atomic_inc_return_unchecked(v);
57351 +}
57352 +#endif
57353 +
57354 static inline long atomic_long_dec_return(atomic_long_t *l)
57355 {
57356 atomic_t *v = (atomic_t *)l;
57357 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
57358
57359 #endif /* BITS_PER_LONG == 64 */
57360
57361 +#ifdef CONFIG_PAX_REFCOUNT
57362 +static inline void pax_refcount_needs_these_functions(void)
57363 +{
57364 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57365 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57366 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57367 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57368 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57369 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57370 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57371 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57372 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57373 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57374 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57375 +
57376 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57377 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57378 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57379 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57380 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57381 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57382 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57383 +}
57384 +#else
57385 +#define atomic_read_unchecked(v) atomic_read(v)
57386 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57387 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57388 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57389 +#define atomic_inc_unchecked(v) atomic_inc(v)
57390 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57391 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57392 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57393 +#define atomic_dec_unchecked(v) atomic_dec(v)
57394 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57395 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57396 +
57397 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57398 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57399 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57400 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57401 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57402 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57403 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57404 +#endif
57405 +
57406 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57407 diff -urNp linux-3.0.8/include/asm-generic/cache.h linux-3.0.8/include/asm-generic/cache.h
57408 --- linux-3.0.8/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
57409 +++ linux-3.0.8/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
57410 @@ -6,7 +6,7 @@
57411 * cache lines need to provide their own cache.h.
57412 */
57413
57414 -#define L1_CACHE_SHIFT 5
57415 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57416 +#define L1_CACHE_SHIFT 5UL
57417 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57418
57419 #endif /* __ASM_GENERIC_CACHE_H */
57420 diff -urNp linux-3.0.8/include/asm-generic/int-l64.h linux-3.0.8/include/asm-generic/int-l64.h
57421 --- linux-3.0.8/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
57422 +++ linux-3.0.8/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
57423 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57424 typedef signed long s64;
57425 typedef unsigned long u64;
57426
57427 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57428 +
57429 #define S8_C(x) x
57430 #define U8_C(x) x ## U
57431 #define S16_C(x) x
57432 diff -urNp linux-3.0.8/include/asm-generic/int-ll64.h linux-3.0.8/include/asm-generic/int-ll64.h
57433 --- linux-3.0.8/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
57434 +++ linux-3.0.8/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
57435 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57436 typedef signed long long s64;
57437 typedef unsigned long long u64;
57438
57439 +typedef unsigned long long intoverflow_t;
57440 +
57441 #define S8_C(x) x
57442 #define U8_C(x) x ## U
57443 #define S16_C(x) x
57444 diff -urNp linux-3.0.8/include/asm-generic/kmap_types.h linux-3.0.8/include/asm-generic/kmap_types.h
57445 --- linux-3.0.8/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
57446 +++ linux-3.0.8/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
57447 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57448 KMAP_D(17) KM_NMI,
57449 KMAP_D(18) KM_NMI_PTE,
57450 KMAP_D(19) KM_KDB,
57451 +KMAP_D(20) KM_CLEARPAGE,
57452 /*
57453 * Remember to update debug_kmap_atomic() when adding new kmap types!
57454 */
57455 -KMAP_D(20) KM_TYPE_NR
57456 +KMAP_D(21) KM_TYPE_NR
57457 };
57458
57459 #undef KMAP_D
57460 diff -urNp linux-3.0.8/include/asm-generic/pgtable.h linux-3.0.8/include/asm-generic/pgtable.h
57461 --- linux-3.0.8/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
57462 +++ linux-3.0.8/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
57463 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57464 #endif /* __HAVE_ARCH_PMD_WRITE */
57465 #endif
57466
57467 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57468 +static inline unsigned long pax_open_kernel(void) { return 0; }
57469 +#endif
57470 +
57471 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57472 +static inline unsigned long pax_close_kernel(void) { return 0; }
57473 +#endif
57474 +
57475 #endif /* !__ASSEMBLY__ */
57476
57477 #endif /* _ASM_GENERIC_PGTABLE_H */
57478 diff -urNp linux-3.0.8/include/asm-generic/pgtable-nopmd.h linux-3.0.8/include/asm-generic/pgtable-nopmd.h
57479 --- linux-3.0.8/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
57480 +++ linux-3.0.8/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
57481 @@ -1,14 +1,19 @@
57482 #ifndef _PGTABLE_NOPMD_H
57483 #define _PGTABLE_NOPMD_H
57484
57485 -#ifndef __ASSEMBLY__
57486 -
57487 #include <asm-generic/pgtable-nopud.h>
57488
57489 -struct mm_struct;
57490 -
57491 #define __PAGETABLE_PMD_FOLDED
57492
57493 +#define PMD_SHIFT PUD_SHIFT
57494 +#define PTRS_PER_PMD 1
57495 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57496 +#define PMD_MASK (~(PMD_SIZE-1))
57497 +
57498 +#ifndef __ASSEMBLY__
57499 +
57500 +struct mm_struct;
57501 +
57502 /*
57503 * Having the pmd type consist of a pud gets the size right, and allows
57504 * us to conceptually access the pud entry that this pmd is folded into
57505 @@ -16,11 +21,6 @@ struct mm_struct;
57506 */
57507 typedef struct { pud_t pud; } pmd_t;
57508
57509 -#define PMD_SHIFT PUD_SHIFT
57510 -#define PTRS_PER_PMD 1
57511 -#define PMD_SIZE (1UL << PMD_SHIFT)
57512 -#define PMD_MASK (~(PMD_SIZE-1))
57513 -
57514 /*
57515 * The "pud_xxx()" functions here are trivial for a folded two-level
57516 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57517 diff -urNp linux-3.0.8/include/asm-generic/pgtable-nopud.h linux-3.0.8/include/asm-generic/pgtable-nopud.h
57518 --- linux-3.0.8/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
57519 +++ linux-3.0.8/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
57520 @@ -1,10 +1,15 @@
57521 #ifndef _PGTABLE_NOPUD_H
57522 #define _PGTABLE_NOPUD_H
57523
57524 -#ifndef __ASSEMBLY__
57525 -
57526 #define __PAGETABLE_PUD_FOLDED
57527
57528 +#define PUD_SHIFT PGDIR_SHIFT
57529 +#define PTRS_PER_PUD 1
57530 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57531 +#define PUD_MASK (~(PUD_SIZE-1))
57532 +
57533 +#ifndef __ASSEMBLY__
57534 +
57535 /*
57536 * Having the pud type consist of a pgd gets the size right, and allows
57537 * us to conceptually access the pgd entry that this pud is folded into
57538 @@ -12,11 +17,6 @@
57539 */
57540 typedef struct { pgd_t pgd; } pud_t;
57541
57542 -#define PUD_SHIFT PGDIR_SHIFT
57543 -#define PTRS_PER_PUD 1
57544 -#define PUD_SIZE (1UL << PUD_SHIFT)
57545 -#define PUD_MASK (~(PUD_SIZE-1))
57546 -
57547 /*
57548 * The "pgd_xxx()" functions here are trivial for a folded two-level
57549 * setup: the pud is never bad, and a pud always exists (as it's folded
57550 diff -urNp linux-3.0.8/include/asm-generic/vmlinux.lds.h linux-3.0.8/include/asm-generic/vmlinux.lds.h
57551 --- linux-3.0.8/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
57552 +++ linux-3.0.8/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
57553 @@ -217,6 +217,7 @@
57554 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57555 VMLINUX_SYMBOL(__start_rodata) = .; \
57556 *(.rodata) *(.rodata.*) \
57557 + *(.data..read_only) \
57558 *(__vermagic) /* Kernel version magic */ \
57559 . = ALIGN(8); \
57560 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57561 @@ -723,17 +724,18 @@
57562 * section in the linker script will go there too. @phdr should have
57563 * a leading colon.
57564 *
57565 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57566 + * Note that this macros defines per_cpu_load as an absolute symbol.
57567 * If there is no need to put the percpu section at a predetermined
57568 * address, use PERCPU_SECTION.
57569 */
57570 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57571 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57572 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57573 + per_cpu_load = .; \
57574 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57575 - LOAD_OFFSET) { \
57576 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57577 PERCPU_INPUT(cacheline) \
57578 } phdr \
57579 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57580 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57581
57582 /**
57583 * PERCPU_SECTION - define output section for percpu area, simple version
57584 diff -urNp linux-3.0.8/include/drm/drm_crtc_helper.h linux-3.0.8/include/drm/drm_crtc_helper.h
57585 --- linux-3.0.8/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
57586 +++ linux-3.0.8/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
57587 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57588
57589 /* disable crtc when not in use - more explicit than dpms off */
57590 void (*disable)(struct drm_crtc *crtc);
57591 -};
57592 +} __no_const;
57593
57594 struct drm_encoder_helper_funcs {
57595 void (*dpms)(struct drm_encoder *encoder, int mode);
57596 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57597 struct drm_connector *connector);
57598 /* disable encoder when not in use - more explicit than dpms off */
57599 void (*disable)(struct drm_encoder *encoder);
57600 -};
57601 +} __no_const;
57602
57603 struct drm_connector_helper_funcs {
57604 int (*get_modes)(struct drm_connector *connector);
57605 diff -urNp linux-3.0.8/include/drm/drmP.h linux-3.0.8/include/drm/drmP.h
57606 --- linux-3.0.8/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
57607 +++ linux-3.0.8/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
57608 @@ -73,6 +73,7 @@
57609 #include <linux/workqueue.h>
57610 #include <linux/poll.h>
57611 #include <asm/pgalloc.h>
57612 +#include <asm/local.h>
57613 #include "drm.h"
57614
57615 #include <linux/idr.h>
57616 @@ -1033,7 +1034,7 @@ struct drm_device {
57617
57618 /** \name Usage Counters */
57619 /*@{ */
57620 - int open_count; /**< Outstanding files open */
57621 + local_t open_count; /**< Outstanding files open */
57622 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57623 atomic_t vma_count; /**< Outstanding vma areas open */
57624 int buf_use; /**< Buffers in use -- cannot alloc */
57625 @@ -1044,7 +1045,7 @@ struct drm_device {
57626 /*@{ */
57627 unsigned long counters;
57628 enum drm_stat_type types[15];
57629 - atomic_t counts[15];
57630 + atomic_unchecked_t counts[15];
57631 /*@} */
57632
57633 struct list_head filelist;
57634 diff -urNp linux-3.0.8/include/drm/ttm/ttm_memory.h linux-3.0.8/include/drm/ttm/ttm_memory.h
57635 --- linux-3.0.8/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
57636 +++ linux-3.0.8/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
57637 @@ -47,7 +47,7 @@
57638
57639 struct ttm_mem_shrink {
57640 int (*do_shrink) (struct ttm_mem_shrink *);
57641 -};
57642 +} __no_const;
57643
57644 /**
57645 * struct ttm_mem_global - Global memory accounting structure.
57646 diff -urNp linux-3.0.8/include/linux/a.out.h linux-3.0.8/include/linux/a.out.h
57647 --- linux-3.0.8/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
57648 +++ linux-3.0.8/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
57649 @@ -39,6 +39,14 @@ enum machine_type {
57650 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57651 };
57652
57653 +/* Constants for the N_FLAGS field */
57654 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57655 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57656 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57657 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57658 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57659 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57660 +
57661 #if !defined (N_MAGIC)
57662 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57663 #endif
57664 diff -urNp linux-3.0.8/include/linux/atmdev.h linux-3.0.8/include/linux/atmdev.h
57665 --- linux-3.0.8/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
57666 +++ linux-3.0.8/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
57667 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57668 #endif
57669
57670 struct k_atm_aal_stats {
57671 -#define __HANDLE_ITEM(i) atomic_t i
57672 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57673 __AAL_STAT_ITEMS
57674 #undef __HANDLE_ITEM
57675 };
57676 diff -urNp linux-3.0.8/include/linux/binfmts.h linux-3.0.8/include/linux/binfmts.h
57677 --- linux-3.0.8/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
57678 +++ linux-3.0.8/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
57679 @@ -88,6 +88,7 @@ struct linux_binfmt {
57680 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57681 int (*load_shlib)(struct file *);
57682 int (*core_dump)(struct coredump_params *cprm);
57683 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57684 unsigned long min_coredump; /* minimal dump size */
57685 };
57686
57687 diff -urNp linux-3.0.8/include/linux/blkdev.h linux-3.0.8/include/linux/blkdev.h
57688 --- linux-3.0.8/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
57689 +++ linux-3.0.8/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
57690 @@ -1308,7 +1308,7 @@ struct block_device_operations {
57691 /* this callback is with swap_lock and sometimes page table lock held */
57692 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57693 struct module *owner;
57694 -};
57695 +} __do_const;
57696
57697 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57698 unsigned long);
57699 diff -urNp linux-3.0.8/include/linux/blktrace_api.h linux-3.0.8/include/linux/blktrace_api.h
57700 --- linux-3.0.8/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
57701 +++ linux-3.0.8/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
57702 @@ -161,7 +161,7 @@ struct blk_trace {
57703 struct dentry *dir;
57704 struct dentry *dropped_file;
57705 struct dentry *msg_file;
57706 - atomic_t dropped;
57707 + atomic_unchecked_t dropped;
57708 };
57709
57710 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57711 diff -urNp linux-3.0.8/include/linux/byteorder/little_endian.h linux-3.0.8/include/linux/byteorder/little_endian.h
57712 --- linux-3.0.8/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
57713 +++ linux-3.0.8/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
57714 @@ -42,51 +42,51 @@
57715
57716 static inline __le64 __cpu_to_le64p(const __u64 *p)
57717 {
57718 - return (__force __le64)*p;
57719 + return (__force const __le64)*p;
57720 }
57721 static inline __u64 __le64_to_cpup(const __le64 *p)
57722 {
57723 - return (__force __u64)*p;
57724 + return (__force const __u64)*p;
57725 }
57726 static inline __le32 __cpu_to_le32p(const __u32 *p)
57727 {
57728 - return (__force __le32)*p;
57729 + return (__force const __le32)*p;
57730 }
57731 static inline __u32 __le32_to_cpup(const __le32 *p)
57732 {
57733 - return (__force __u32)*p;
57734 + return (__force const __u32)*p;
57735 }
57736 static inline __le16 __cpu_to_le16p(const __u16 *p)
57737 {
57738 - return (__force __le16)*p;
57739 + return (__force const __le16)*p;
57740 }
57741 static inline __u16 __le16_to_cpup(const __le16 *p)
57742 {
57743 - return (__force __u16)*p;
57744 + return (__force const __u16)*p;
57745 }
57746 static inline __be64 __cpu_to_be64p(const __u64 *p)
57747 {
57748 - return (__force __be64)__swab64p(p);
57749 + return (__force const __be64)__swab64p(p);
57750 }
57751 static inline __u64 __be64_to_cpup(const __be64 *p)
57752 {
57753 - return __swab64p((__u64 *)p);
57754 + return __swab64p((const __u64 *)p);
57755 }
57756 static inline __be32 __cpu_to_be32p(const __u32 *p)
57757 {
57758 - return (__force __be32)__swab32p(p);
57759 + return (__force const __be32)__swab32p(p);
57760 }
57761 static inline __u32 __be32_to_cpup(const __be32 *p)
57762 {
57763 - return __swab32p((__u32 *)p);
57764 + return __swab32p((const __u32 *)p);
57765 }
57766 static inline __be16 __cpu_to_be16p(const __u16 *p)
57767 {
57768 - return (__force __be16)__swab16p(p);
57769 + return (__force const __be16)__swab16p(p);
57770 }
57771 static inline __u16 __be16_to_cpup(const __be16 *p)
57772 {
57773 - return __swab16p((__u16 *)p);
57774 + return __swab16p((const __u16 *)p);
57775 }
57776 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57777 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57778 diff -urNp linux-3.0.8/include/linux/cache.h linux-3.0.8/include/linux/cache.h
57779 --- linux-3.0.8/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
57780 +++ linux-3.0.8/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
57781 @@ -16,6 +16,10 @@
57782 #define __read_mostly
57783 #endif
57784
57785 +#ifndef __read_only
57786 +#define __read_only __read_mostly
57787 +#endif
57788 +
57789 #ifndef ____cacheline_aligned
57790 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57791 #endif
57792 diff -urNp linux-3.0.8/include/linux/capability.h linux-3.0.8/include/linux/capability.h
57793 --- linux-3.0.8/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
57794 +++ linux-3.0.8/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
57795 @@ -547,6 +547,9 @@ extern bool capable(int cap);
57796 extern bool ns_capable(struct user_namespace *ns, int cap);
57797 extern bool task_ns_capable(struct task_struct *t, int cap);
57798 extern bool nsown_capable(int cap);
57799 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57800 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57801 +extern bool capable_nolog(int cap);
57802
57803 /* audit system wants to get cap info from files as well */
57804 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57805 diff -urNp linux-3.0.8/include/linux/cleancache.h linux-3.0.8/include/linux/cleancache.h
57806 --- linux-3.0.8/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
57807 +++ linux-3.0.8/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
57808 @@ -31,7 +31,7 @@ struct cleancache_ops {
57809 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57810 void (*flush_inode)(int, struct cleancache_filekey);
57811 void (*flush_fs)(int);
57812 -};
57813 +} __no_const;
57814
57815 extern struct cleancache_ops
57816 cleancache_register_ops(struct cleancache_ops *ops);
57817 diff -urNp linux-3.0.8/include/linux/compiler-gcc4.h linux-3.0.8/include/linux/compiler-gcc4.h
57818 --- linux-3.0.8/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
57819 +++ linux-3.0.8/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
57820 @@ -31,6 +31,12 @@
57821
57822
57823 #if __GNUC_MINOR__ >= 5
57824 +
57825 +#ifdef CONSTIFY_PLUGIN
57826 +#define __no_const __attribute__((no_const))
57827 +#define __do_const __attribute__((do_const))
57828 +#endif
57829 +
57830 /*
57831 * Mark a position in code as unreachable. This can be used to
57832 * suppress control flow warnings after asm blocks that transfer
57833 @@ -46,6 +52,11 @@
57834 #define __noclone __attribute__((__noclone__))
57835
57836 #endif
57837 +
57838 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57839 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57840 +#define __bos0(ptr) __bos((ptr), 0)
57841 +#define __bos1(ptr) __bos((ptr), 1)
57842 #endif
57843
57844 #if __GNUC_MINOR__ > 0
57845 diff -urNp linux-3.0.8/include/linux/compiler.h linux-3.0.8/include/linux/compiler.h
57846 --- linux-3.0.8/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
57847 +++ linux-3.0.8/include/linux/compiler.h 2011-10-06 04:17:55.000000000 -0400
57848 @@ -5,31 +5,62 @@
57849
57850 #ifdef __CHECKER__
57851 # define __user __attribute__((noderef, address_space(1)))
57852 +# define __force_user __force __user
57853 # define __kernel __attribute__((address_space(0)))
57854 +# define __force_kernel __force __kernel
57855 # define __safe __attribute__((safe))
57856 # define __force __attribute__((force))
57857 # define __nocast __attribute__((nocast))
57858 # define __iomem __attribute__((noderef, address_space(2)))
57859 +# define __force_iomem __force __iomem
57860 # define __acquires(x) __attribute__((context(x,0,1)))
57861 # define __releases(x) __attribute__((context(x,1,0)))
57862 # define __acquire(x) __context__(x,1)
57863 # define __release(x) __context__(x,-1)
57864 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57865 # define __percpu __attribute__((noderef, address_space(3)))
57866 +# define __force_percpu __force __percpu
57867 #ifdef CONFIG_SPARSE_RCU_POINTER
57868 # define __rcu __attribute__((noderef, address_space(4)))
57869 +# define __force_rcu __force __rcu
57870 #else
57871 # define __rcu
57872 +# define __force_rcu
57873 #endif
57874 extern void __chk_user_ptr(const volatile void __user *);
57875 extern void __chk_io_ptr(const volatile void __iomem *);
57876 +#elif defined(CHECKER_PLUGIN)
57877 +//# define __user
57878 +//# define __force_user
57879 +//# define __kernel
57880 +//# define __force_kernel
57881 +# define __safe
57882 +# define __force
57883 +# define __nocast
57884 +# define __iomem
57885 +# define __force_iomem
57886 +# define __chk_user_ptr(x) (void)0
57887 +# define __chk_io_ptr(x) (void)0
57888 +# define __builtin_warning(x, y...) (1)
57889 +# define __acquires(x)
57890 +# define __releases(x)
57891 +# define __acquire(x) (void)0
57892 +# define __release(x) (void)0
57893 +# define __cond_lock(x,c) (c)
57894 +# define __percpu
57895 +# define __force_percpu
57896 +# define __rcu
57897 +# define __force_rcu
57898 #else
57899 # define __user
57900 +# define __force_user
57901 # define __kernel
57902 +# define __force_kernel
57903 # define __safe
57904 # define __force
57905 # define __nocast
57906 # define __iomem
57907 +# define __force_iomem
57908 # define __chk_user_ptr(x) (void)0
57909 # define __chk_io_ptr(x) (void)0
57910 # define __builtin_warning(x, y...) (1)
57911 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57912 # define __release(x) (void)0
57913 # define __cond_lock(x,c) (c)
57914 # define __percpu
57915 +# define __force_percpu
57916 # define __rcu
57917 +# define __force_rcu
57918 #endif
57919
57920 #ifdef __KERNEL__
57921 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57922 # define __attribute_const__ /* unimplemented */
57923 #endif
57924
57925 +#ifndef __no_const
57926 +# define __no_const
57927 +#endif
57928 +
57929 +#ifndef __do_const
57930 +# define __do_const
57931 +#endif
57932 +
57933 /*
57934 * Tell gcc if a function is cold. The compiler will assume any path
57935 * directly leading to the call is unlikely.
57936 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57937 #define __cold
57938 #endif
57939
57940 +#ifndef __alloc_size
57941 +#define __alloc_size(...)
57942 +#endif
57943 +
57944 +#ifndef __bos
57945 +#define __bos(ptr, arg)
57946 +#endif
57947 +
57948 +#ifndef __bos0
57949 +#define __bos0(ptr)
57950 +#endif
57951 +
57952 +#ifndef __bos1
57953 +#define __bos1(ptr)
57954 +#endif
57955 +
57956 /* Simple shorthand for a section definition */
57957 #ifndef __section
57958 # define __section(S) __attribute__ ((__section__(#S)))
57959 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57960 * use is to mediate communication between process-level code and irq/NMI
57961 * handlers, all running on the same CPU.
57962 */
57963 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57964 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57965 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57966
57967 #endif /* __LINUX_COMPILER_H */
57968 diff -urNp linux-3.0.8/include/linux/cpuset.h linux-3.0.8/include/linux/cpuset.h
57969 --- linux-3.0.8/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
57970 +++ linux-3.0.8/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
57971 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57972 * nodemask.
57973 */
57974 smp_mb();
57975 - --ACCESS_ONCE(current->mems_allowed_change_disable);
57976 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57977 }
57978
57979 static inline void set_mems_allowed(nodemask_t nodemask)
57980 diff -urNp linux-3.0.8/include/linux/crypto.h linux-3.0.8/include/linux/crypto.h
57981 --- linux-3.0.8/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
57982 +++ linux-3.0.8/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
57983 @@ -361,7 +361,7 @@ struct cipher_tfm {
57984 const u8 *key, unsigned int keylen);
57985 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57986 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57987 -};
57988 +} __no_const;
57989
57990 struct hash_tfm {
57991 int (*init)(struct hash_desc *desc);
57992 @@ -382,13 +382,13 @@ struct compress_tfm {
57993 int (*cot_decompress)(struct crypto_tfm *tfm,
57994 const u8 *src, unsigned int slen,
57995 u8 *dst, unsigned int *dlen);
57996 -};
57997 +} __no_const;
57998
57999 struct rng_tfm {
58000 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58001 unsigned int dlen);
58002 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58003 -};
58004 +} __no_const;
58005
58006 #define crt_ablkcipher crt_u.ablkcipher
58007 #define crt_aead crt_u.aead
58008 diff -urNp linux-3.0.8/include/linux/decompress/mm.h linux-3.0.8/include/linux/decompress/mm.h
58009 --- linux-3.0.8/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
58010 +++ linux-3.0.8/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
58011 @@ -77,7 +77,7 @@ static void free(void *where)
58012 * warnings when not needed (indeed large_malloc / large_free are not
58013 * needed by inflate */
58014
58015 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58016 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58017 #define free(a) kfree(a)
58018
58019 #define large_malloc(a) vmalloc(a)
58020 diff -urNp linux-3.0.8/include/linux/dma-mapping.h linux-3.0.8/include/linux/dma-mapping.h
58021 --- linux-3.0.8/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
58022 +++ linux-3.0.8/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
58023 @@ -50,7 +50,7 @@ struct dma_map_ops {
58024 int (*dma_supported)(struct device *dev, u64 mask);
58025 int (*set_dma_mask)(struct device *dev, u64 mask);
58026 int is_phys;
58027 -};
58028 +} __do_const;
58029
58030 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58031
58032 diff -urNp linux-3.0.8/include/linux/efi.h linux-3.0.8/include/linux/efi.h
58033 --- linux-3.0.8/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
58034 +++ linux-3.0.8/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
58035 @@ -410,7 +410,7 @@ struct efivar_operations {
58036 efi_get_variable_t *get_variable;
58037 efi_get_next_variable_t *get_next_variable;
58038 efi_set_variable_t *set_variable;
58039 -};
58040 +} __no_const;
58041
58042 struct efivars {
58043 /*
58044 diff -urNp linux-3.0.8/include/linux/elf.h linux-3.0.8/include/linux/elf.h
58045 --- linux-3.0.8/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
58046 +++ linux-3.0.8/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
58047 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58048 #define PT_GNU_EH_FRAME 0x6474e550
58049
58050 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58051 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58052 +
58053 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58054 +
58055 +/* Constants for the e_flags field */
58056 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58057 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58058 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58059 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58060 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58061 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58062
58063 /*
58064 * Extended Numbering
58065 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58066 #define DT_DEBUG 21
58067 #define DT_TEXTREL 22
58068 #define DT_JMPREL 23
58069 +#define DT_FLAGS 30
58070 + #define DF_TEXTREL 0x00000004
58071 #define DT_ENCODING 32
58072 #define OLD_DT_LOOS 0x60000000
58073 #define DT_LOOS 0x6000000d
58074 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58075 #define PF_W 0x2
58076 #define PF_X 0x1
58077
58078 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58079 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58080 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58081 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58082 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58083 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58084 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58085 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58086 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58087 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58088 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58089 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58090 +
58091 typedef struct elf32_phdr{
58092 Elf32_Word p_type;
58093 Elf32_Off p_offset;
58094 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58095 #define EI_OSABI 7
58096 #define EI_PAD 8
58097
58098 +#define EI_PAX 14
58099 +
58100 #define ELFMAG0 0x7f /* EI_MAG */
58101 #define ELFMAG1 'E'
58102 #define ELFMAG2 'L'
58103 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
58104 #define elf_note elf32_note
58105 #define elf_addr_t Elf32_Off
58106 #define Elf_Half Elf32_Half
58107 +#define elf_dyn Elf32_Dyn
58108
58109 #else
58110
58111 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
58112 #define elf_note elf64_note
58113 #define elf_addr_t Elf64_Off
58114 #define Elf_Half Elf64_Half
58115 +#define elf_dyn Elf64_Dyn
58116
58117 #endif
58118
58119 diff -urNp linux-3.0.8/include/linux/firewire.h linux-3.0.8/include/linux/firewire.h
58120 --- linux-3.0.8/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
58121 +++ linux-3.0.8/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
58122 @@ -428,7 +428,7 @@ struct fw_iso_context {
58123 union {
58124 fw_iso_callback_t sc;
58125 fw_iso_mc_callback_t mc;
58126 - } callback;
58127 + } __no_const callback;
58128 void *callback_data;
58129 };
58130
58131 diff -urNp linux-3.0.8/include/linux/fscache-cache.h linux-3.0.8/include/linux/fscache-cache.h
58132 --- linux-3.0.8/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
58133 +++ linux-3.0.8/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
58134 @@ -102,7 +102,7 @@ struct fscache_operation {
58135 fscache_operation_release_t release;
58136 };
58137
58138 -extern atomic_t fscache_op_debug_id;
58139 +extern atomic_unchecked_t fscache_op_debug_id;
58140 extern void fscache_op_work_func(struct work_struct *work);
58141
58142 extern void fscache_enqueue_operation(struct fscache_operation *);
58143 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
58144 {
58145 INIT_WORK(&op->work, fscache_op_work_func);
58146 atomic_set(&op->usage, 1);
58147 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58148 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58149 op->processor = processor;
58150 op->release = release;
58151 INIT_LIST_HEAD(&op->pend_link);
58152 diff -urNp linux-3.0.8/include/linux/fs.h linux-3.0.8/include/linux/fs.h
58153 --- linux-3.0.8/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
58154 +++ linux-3.0.8/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
58155 @@ -109,6 +109,11 @@ struct inodes_stat_t {
58156 /* File was opened by fanotify and shouldn't generate fanotify events */
58157 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
58158
58159 +/* Hack for grsec so as not to require read permission simply to execute
58160 + * a binary
58161 + */
58162 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
58163 +
58164 /*
58165 * The below are the various read and write types that we support. Some of
58166 * them include behavioral modifiers that send information down to the
58167 @@ -1571,7 +1576,8 @@ struct file_operations {
58168 int (*setlease)(struct file *, long, struct file_lock **);
58169 long (*fallocate)(struct file *file, int mode, loff_t offset,
58170 loff_t len);
58171 -};
58172 +} __do_const;
58173 +typedef struct file_operations __no_const file_operations_no_const;
58174
58175 #define IPERM_FLAG_RCU 0x0001
58176
58177 diff -urNp linux-3.0.8/include/linux/fsnotify.h linux-3.0.8/include/linux/fsnotify.h
58178 --- linux-3.0.8/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
58179 +++ linux-3.0.8/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
58180 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
58181 */
58182 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58183 {
58184 - return kstrdup(name, GFP_KERNEL);
58185 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58186 }
58187
58188 /*
58189 diff -urNp linux-3.0.8/include/linux/fs_struct.h linux-3.0.8/include/linux/fs_struct.h
58190 --- linux-3.0.8/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
58191 +++ linux-3.0.8/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
58192 @@ -6,7 +6,7 @@
58193 #include <linux/seqlock.h>
58194
58195 struct fs_struct {
58196 - int users;
58197 + atomic_t users;
58198 spinlock_t lock;
58199 seqcount_t seq;
58200 int umask;
58201 diff -urNp linux-3.0.8/include/linux/ftrace_event.h linux-3.0.8/include/linux/ftrace_event.h
58202 --- linux-3.0.8/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
58203 +++ linux-3.0.8/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
58204 @@ -96,7 +96,7 @@ struct trace_event_functions {
58205 trace_print_func raw;
58206 trace_print_func hex;
58207 trace_print_func binary;
58208 -};
58209 +} __no_const;
58210
58211 struct trace_event {
58212 struct hlist_node node;
58213 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
58214 extern int trace_add_event_call(struct ftrace_event_call *call);
58215 extern void trace_remove_event_call(struct ftrace_event_call *call);
58216
58217 -#define is_signed_type(type) (((type)(-1)) < 0)
58218 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58219
58220 int trace_set_clr_event(const char *system, const char *event, int set);
58221
58222 diff -urNp linux-3.0.8/include/linux/genhd.h linux-3.0.8/include/linux/genhd.h
58223 --- linux-3.0.8/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
58224 +++ linux-3.0.8/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
58225 @@ -184,7 +184,7 @@ struct gendisk {
58226 struct kobject *slave_dir;
58227
58228 struct timer_rand_state *random;
58229 - atomic_t sync_io; /* RAID */
58230 + atomic_unchecked_t sync_io; /* RAID */
58231 struct disk_events *ev;
58232 #ifdef CONFIG_BLK_DEV_INTEGRITY
58233 struct blk_integrity *integrity;
58234 diff -urNp linux-3.0.8/include/linux/gracl.h linux-3.0.8/include/linux/gracl.h
58235 --- linux-3.0.8/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58236 +++ linux-3.0.8/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
58237 @@ -0,0 +1,317 @@
58238 +#ifndef GR_ACL_H
58239 +#define GR_ACL_H
58240 +
58241 +#include <linux/grdefs.h>
58242 +#include <linux/resource.h>
58243 +#include <linux/capability.h>
58244 +#include <linux/dcache.h>
58245 +#include <asm/resource.h>
58246 +
58247 +/* Major status information */
58248 +
58249 +#define GR_VERSION "grsecurity 2.2.2"
58250 +#define GRSECURITY_VERSION 0x2202
58251 +
58252 +enum {
58253 + GR_SHUTDOWN = 0,
58254 + GR_ENABLE = 1,
58255 + GR_SPROLE = 2,
58256 + GR_RELOAD = 3,
58257 + GR_SEGVMOD = 4,
58258 + GR_STATUS = 5,
58259 + GR_UNSPROLE = 6,
58260 + GR_PASSSET = 7,
58261 + GR_SPROLEPAM = 8,
58262 +};
58263 +
58264 +/* Password setup definitions
58265 + * kernel/grhash.c */
58266 +enum {
58267 + GR_PW_LEN = 128,
58268 + GR_SALT_LEN = 16,
58269 + GR_SHA_LEN = 32,
58270 +};
58271 +
58272 +enum {
58273 + GR_SPROLE_LEN = 64,
58274 +};
58275 +
58276 +enum {
58277 + GR_NO_GLOB = 0,
58278 + GR_REG_GLOB,
58279 + GR_CREATE_GLOB
58280 +};
58281 +
58282 +#define GR_NLIMITS 32
58283 +
58284 +/* Begin Data Structures */
58285 +
58286 +struct sprole_pw {
58287 + unsigned char *rolename;
58288 + unsigned char salt[GR_SALT_LEN];
58289 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58290 +};
58291 +
58292 +struct name_entry {
58293 + __u32 key;
58294 + ino_t inode;
58295 + dev_t device;
58296 + char *name;
58297 + __u16 len;
58298 + __u8 deleted;
58299 + struct name_entry *prev;
58300 + struct name_entry *next;
58301 +};
58302 +
58303 +struct inodev_entry {
58304 + struct name_entry *nentry;
58305 + struct inodev_entry *prev;
58306 + struct inodev_entry *next;
58307 +};
58308 +
58309 +struct acl_role_db {
58310 + struct acl_role_label **r_hash;
58311 + __u32 r_size;
58312 +};
58313 +
58314 +struct inodev_db {
58315 + struct inodev_entry **i_hash;
58316 + __u32 i_size;
58317 +};
58318 +
58319 +struct name_db {
58320 + struct name_entry **n_hash;
58321 + __u32 n_size;
58322 +};
58323 +
58324 +struct crash_uid {
58325 + uid_t uid;
58326 + unsigned long expires;
58327 +};
58328 +
58329 +struct gr_hash_struct {
58330 + void **table;
58331 + void **nametable;
58332 + void *first;
58333 + __u32 table_size;
58334 + __u32 used_size;
58335 + int type;
58336 +};
58337 +
58338 +/* Userspace Grsecurity ACL data structures */
58339 +
58340 +struct acl_subject_label {
58341 + char *filename;
58342 + ino_t inode;
58343 + dev_t device;
58344 + __u32 mode;
58345 + kernel_cap_t cap_mask;
58346 + kernel_cap_t cap_lower;
58347 + kernel_cap_t cap_invert_audit;
58348 +
58349 + struct rlimit res[GR_NLIMITS];
58350 + __u32 resmask;
58351 +
58352 + __u8 user_trans_type;
58353 + __u8 group_trans_type;
58354 + uid_t *user_transitions;
58355 + gid_t *group_transitions;
58356 + __u16 user_trans_num;
58357 + __u16 group_trans_num;
58358 +
58359 + __u32 sock_families[2];
58360 + __u32 ip_proto[8];
58361 + __u32 ip_type;
58362 + struct acl_ip_label **ips;
58363 + __u32 ip_num;
58364 + __u32 inaddr_any_override;
58365 +
58366 + __u32 crashes;
58367 + unsigned long expires;
58368 +
58369 + struct acl_subject_label *parent_subject;
58370 + struct gr_hash_struct *hash;
58371 + struct acl_subject_label *prev;
58372 + struct acl_subject_label *next;
58373 +
58374 + struct acl_object_label **obj_hash;
58375 + __u32 obj_hash_size;
58376 + __u16 pax_flags;
58377 +};
58378 +
58379 +struct role_allowed_ip {
58380 + __u32 addr;
58381 + __u32 netmask;
58382 +
58383 + struct role_allowed_ip *prev;
58384 + struct role_allowed_ip *next;
58385 +};
58386 +
58387 +struct role_transition {
58388 + char *rolename;
58389 +
58390 + struct role_transition *prev;
58391 + struct role_transition *next;
58392 +};
58393 +
58394 +struct acl_role_label {
58395 + char *rolename;
58396 + uid_t uidgid;
58397 + __u16 roletype;
58398 +
58399 + __u16 auth_attempts;
58400 + unsigned long expires;
58401 +
58402 + struct acl_subject_label *root_label;
58403 + struct gr_hash_struct *hash;
58404 +
58405 + struct acl_role_label *prev;
58406 + struct acl_role_label *next;
58407 +
58408 + struct role_transition *transitions;
58409 + struct role_allowed_ip *allowed_ips;
58410 + uid_t *domain_children;
58411 + __u16 domain_child_num;
58412 +
58413 + struct acl_subject_label **subj_hash;
58414 + __u32 subj_hash_size;
58415 +};
58416 +
58417 +struct user_acl_role_db {
58418 + struct acl_role_label **r_table;
58419 + __u32 num_pointers; /* Number of allocations to track */
58420 + __u32 num_roles; /* Number of roles */
58421 + __u32 num_domain_children; /* Number of domain children */
58422 + __u32 num_subjects; /* Number of subjects */
58423 + __u32 num_objects; /* Number of objects */
58424 +};
58425 +
58426 +struct acl_object_label {
58427 + char *filename;
58428 + ino_t inode;
58429 + dev_t device;
58430 + __u32 mode;
58431 +
58432 + struct acl_subject_label *nested;
58433 + struct acl_object_label *globbed;
58434 +
58435 + /* next two structures not used */
58436 +
58437 + struct acl_object_label *prev;
58438 + struct acl_object_label *next;
58439 +};
58440 +
58441 +struct acl_ip_label {
58442 + char *iface;
58443 + __u32 addr;
58444 + __u32 netmask;
58445 + __u16 low, high;
58446 + __u8 mode;
58447 + __u32 type;
58448 + __u32 proto[8];
58449 +
58450 + /* next two structures not used */
58451 +
58452 + struct acl_ip_label *prev;
58453 + struct acl_ip_label *next;
58454 +};
58455 +
58456 +struct gr_arg {
58457 + struct user_acl_role_db role_db;
58458 + unsigned char pw[GR_PW_LEN];
58459 + unsigned char salt[GR_SALT_LEN];
58460 + unsigned char sum[GR_SHA_LEN];
58461 + unsigned char sp_role[GR_SPROLE_LEN];
58462 + struct sprole_pw *sprole_pws;
58463 + dev_t segv_device;
58464 + ino_t segv_inode;
58465 + uid_t segv_uid;
58466 + __u16 num_sprole_pws;
58467 + __u16 mode;
58468 +};
58469 +
58470 +struct gr_arg_wrapper {
58471 + struct gr_arg *arg;
58472 + __u32 version;
58473 + __u32 size;
58474 +};
58475 +
58476 +struct subject_map {
58477 + struct acl_subject_label *user;
58478 + struct acl_subject_label *kernel;
58479 + struct subject_map *prev;
58480 + struct subject_map *next;
58481 +};
58482 +
58483 +struct acl_subj_map_db {
58484 + struct subject_map **s_hash;
58485 + __u32 s_size;
58486 +};
58487 +
58488 +/* End Data Structures Section */
58489 +
58490 +/* Hash functions generated by empirical testing by Brad Spengler
58491 + Makes good use of the low bits of the inode. Generally 0-1 times
58492 + in loop for successful match. 0-3 for unsuccessful match.
58493 + Shift/add algorithm with modulus of table size and an XOR*/
58494 +
58495 +static __inline__ unsigned int
58496 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58497 +{
58498 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58499 +}
58500 +
58501 + static __inline__ unsigned int
58502 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58503 +{
58504 + return ((const unsigned long)userp % sz);
58505 +}
58506 +
58507 +static __inline__ unsigned int
58508 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58509 +{
58510 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58511 +}
58512 +
58513 +static __inline__ unsigned int
58514 +nhash(const char *name, const __u16 len, const unsigned int sz)
58515 +{
58516 + return full_name_hash((const unsigned char *)name, len) % sz;
58517 +}
58518 +
58519 +#define FOR_EACH_ROLE_START(role) \
58520 + role = role_list; \
58521 + while (role) {
58522 +
58523 +#define FOR_EACH_ROLE_END(role) \
58524 + role = role->prev; \
58525 + }
58526 +
58527 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58528 + subj = NULL; \
58529 + iter = 0; \
58530 + while (iter < role->subj_hash_size) { \
58531 + if (subj == NULL) \
58532 + subj = role->subj_hash[iter]; \
58533 + if (subj == NULL) { \
58534 + iter++; \
58535 + continue; \
58536 + }
58537 +
58538 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58539 + subj = subj->next; \
58540 + if (subj == NULL) \
58541 + iter++; \
58542 + }
58543 +
58544 +
58545 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58546 + subj = role->hash->first; \
58547 + while (subj != NULL) {
58548 +
58549 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58550 + subj = subj->next; \
58551 + }
58552 +
58553 +#endif
58554 +
58555 diff -urNp linux-3.0.8/include/linux/gralloc.h linux-3.0.8/include/linux/gralloc.h
58556 --- linux-3.0.8/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58557 +++ linux-3.0.8/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
58558 @@ -0,0 +1,9 @@
58559 +#ifndef __GRALLOC_H
58560 +#define __GRALLOC_H
58561 +
58562 +void acl_free_all(void);
58563 +int acl_alloc_stack_init(unsigned long size);
58564 +void *acl_alloc(unsigned long len);
58565 +void *acl_alloc_num(unsigned long num, unsigned long len);
58566 +
58567 +#endif
58568 diff -urNp linux-3.0.8/include/linux/grdefs.h linux-3.0.8/include/linux/grdefs.h
58569 --- linux-3.0.8/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58570 +++ linux-3.0.8/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
58571 @@ -0,0 +1,140 @@
58572 +#ifndef GRDEFS_H
58573 +#define GRDEFS_H
58574 +
58575 +/* Begin grsecurity status declarations */
58576 +
58577 +enum {
58578 + GR_READY = 0x01,
58579 + GR_STATUS_INIT = 0x00 // disabled state
58580 +};
58581 +
58582 +/* Begin ACL declarations */
58583 +
58584 +/* Role flags */
58585 +
58586 +enum {
58587 + GR_ROLE_USER = 0x0001,
58588 + GR_ROLE_GROUP = 0x0002,
58589 + GR_ROLE_DEFAULT = 0x0004,
58590 + GR_ROLE_SPECIAL = 0x0008,
58591 + GR_ROLE_AUTH = 0x0010,
58592 + GR_ROLE_NOPW = 0x0020,
58593 + GR_ROLE_GOD = 0x0040,
58594 + GR_ROLE_LEARN = 0x0080,
58595 + GR_ROLE_TPE = 0x0100,
58596 + GR_ROLE_DOMAIN = 0x0200,
58597 + GR_ROLE_PAM = 0x0400,
58598 + GR_ROLE_PERSIST = 0x0800
58599 +};
58600 +
58601 +/* ACL Subject and Object mode flags */
58602 +enum {
58603 + GR_DELETED = 0x80000000
58604 +};
58605 +
58606 +/* ACL Object-only mode flags */
58607 +enum {
58608 + GR_READ = 0x00000001,
58609 + GR_APPEND = 0x00000002,
58610 + GR_WRITE = 0x00000004,
58611 + GR_EXEC = 0x00000008,
58612 + GR_FIND = 0x00000010,
58613 + GR_INHERIT = 0x00000020,
58614 + GR_SETID = 0x00000040,
58615 + GR_CREATE = 0x00000080,
58616 + GR_DELETE = 0x00000100,
58617 + GR_LINK = 0x00000200,
58618 + GR_AUDIT_READ = 0x00000400,
58619 + GR_AUDIT_APPEND = 0x00000800,
58620 + GR_AUDIT_WRITE = 0x00001000,
58621 + GR_AUDIT_EXEC = 0x00002000,
58622 + GR_AUDIT_FIND = 0x00004000,
58623 + GR_AUDIT_INHERIT= 0x00008000,
58624 + GR_AUDIT_SETID = 0x00010000,
58625 + GR_AUDIT_CREATE = 0x00020000,
58626 + GR_AUDIT_DELETE = 0x00040000,
58627 + GR_AUDIT_LINK = 0x00080000,
58628 + GR_PTRACERD = 0x00100000,
58629 + GR_NOPTRACE = 0x00200000,
58630 + GR_SUPPRESS = 0x00400000,
58631 + GR_NOLEARN = 0x00800000,
58632 + GR_INIT_TRANSFER= 0x01000000
58633 +};
58634 +
58635 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58636 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58637 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58638 +
58639 +/* ACL subject-only mode flags */
58640 +enum {
58641 + GR_KILL = 0x00000001,
58642 + GR_VIEW = 0x00000002,
58643 + GR_PROTECTED = 0x00000004,
58644 + GR_LEARN = 0x00000008,
58645 + GR_OVERRIDE = 0x00000010,
58646 + /* just a placeholder, this mode is only used in userspace */
58647 + GR_DUMMY = 0x00000020,
58648 + GR_PROTSHM = 0x00000040,
58649 + GR_KILLPROC = 0x00000080,
58650 + GR_KILLIPPROC = 0x00000100,
58651 + /* just a placeholder, this mode is only used in userspace */
58652 + GR_NOTROJAN = 0x00000200,
58653 + GR_PROTPROCFD = 0x00000400,
58654 + GR_PROCACCT = 0x00000800,
58655 + GR_RELAXPTRACE = 0x00001000,
58656 + GR_NESTED = 0x00002000,
58657 + GR_INHERITLEARN = 0x00004000,
58658 + GR_PROCFIND = 0x00008000,
58659 + GR_POVERRIDE = 0x00010000,
58660 + GR_KERNELAUTH = 0x00020000,
58661 + GR_ATSECURE = 0x00040000,
58662 + GR_SHMEXEC = 0x00080000
58663 +};
58664 +
58665 +enum {
58666 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58667 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58668 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58669 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58670 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58671 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58672 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58673 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58674 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58675 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58676 +};
58677 +
58678 +enum {
58679 + GR_ID_USER = 0x01,
58680 + GR_ID_GROUP = 0x02,
58681 +};
58682 +
58683 +enum {
58684 + GR_ID_ALLOW = 0x01,
58685 + GR_ID_DENY = 0x02,
58686 +};
58687 +
58688 +#define GR_CRASH_RES 31
58689 +#define GR_UIDTABLE_MAX 500
58690 +
58691 +/* begin resource learning section */
58692 +enum {
58693 + GR_RLIM_CPU_BUMP = 60,
58694 + GR_RLIM_FSIZE_BUMP = 50000,
58695 + GR_RLIM_DATA_BUMP = 10000,
58696 + GR_RLIM_STACK_BUMP = 1000,
58697 + GR_RLIM_CORE_BUMP = 10000,
58698 + GR_RLIM_RSS_BUMP = 500000,
58699 + GR_RLIM_NPROC_BUMP = 1,
58700 + GR_RLIM_NOFILE_BUMP = 5,
58701 + GR_RLIM_MEMLOCK_BUMP = 50000,
58702 + GR_RLIM_AS_BUMP = 500000,
58703 + GR_RLIM_LOCKS_BUMP = 2,
58704 + GR_RLIM_SIGPENDING_BUMP = 5,
58705 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58706 + GR_RLIM_NICE_BUMP = 1,
58707 + GR_RLIM_RTPRIO_BUMP = 1,
58708 + GR_RLIM_RTTIME_BUMP = 1000000
58709 +};
58710 +
58711 +#endif
58712 diff -urNp linux-3.0.8/include/linux/grinternal.h linux-3.0.8/include/linux/grinternal.h
58713 --- linux-3.0.8/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58714 +++ linux-3.0.8/include/linux/grinternal.h 2011-10-20 00:47:28.000000000 -0400
58715 @@ -0,0 +1,220 @@
58716 +#ifndef __GRINTERNAL_H
58717 +#define __GRINTERNAL_H
58718 +
58719 +#ifdef CONFIG_GRKERNSEC
58720 +
58721 +#include <linux/fs.h>
58722 +#include <linux/mnt_namespace.h>
58723 +#include <linux/nsproxy.h>
58724 +#include <linux/gracl.h>
58725 +#include <linux/grdefs.h>
58726 +#include <linux/grmsg.h>
58727 +
58728 +void gr_add_learn_entry(const char *fmt, ...)
58729 + __attribute__ ((format (printf, 1, 2)));
58730 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58731 + const struct vfsmount *mnt);
58732 +__u32 gr_check_create(const struct dentry *new_dentry,
58733 + const struct dentry *parent,
58734 + const struct vfsmount *mnt, const __u32 mode);
58735 +int gr_check_protected_task(const struct task_struct *task);
58736 +__u32 to_gr_audit(const __u32 reqmode);
58737 +int gr_set_acls(const int type);
58738 +int gr_apply_subject_to_task(struct task_struct *task);
58739 +int gr_acl_is_enabled(void);
58740 +char gr_roletype_to_char(void);
58741 +
58742 +void gr_handle_alertkill(struct task_struct *task);
58743 +char *gr_to_filename(const struct dentry *dentry,
58744 + const struct vfsmount *mnt);
58745 +char *gr_to_filename1(const struct dentry *dentry,
58746 + const struct vfsmount *mnt);
58747 +char *gr_to_filename2(const struct dentry *dentry,
58748 + const struct vfsmount *mnt);
58749 +char *gr_to_filename3(const struct dentry *dentry,
58750 + const struct vfsmount *mnt);
58751 +
58752 +extern int grsec_enable_harden_ptrace;
58753 +extern int grsec_enable_link;
58754 +extern int grsec_enable_fifo;
58755 +extern int grsec_enable_execve;
58756 +extern int grsec_enable_shm;
58757 +extern int grsec_enable_execlog;
58758 +extern int grsec_enable_signal;
58759 +extern int grsec_enable_audit_ptrace;
58760 +extern int grsec_enable_forkfail;
58761 +extern int grsec_enable_time;
58762 +extern int grsec_enable_rofs;
58763 +extern int grsec_enable_chroot_shmat;
58764 +extern int grsec_enable_chroot_mount;
58765 +extern int grsec_enable_chroot_double;
58766 +extern int grsec_enable_chroot_pivot;
58767 +extern int grsec_enable_chroot_chdir;
58768 +extern int grsec_enable_chroot_chmod;
58769 +extern int grsec_enable_chroot_mknod;
58770 +extern int grsec_enable_chroot_fchdir;
58771 +extern int grsec_enable_chroot_nice;
58772 +extern int grsec_enable_chroot_execlog;
58773 +extern int grsec_enable_chroot_caps;
58774 +extern int grsec_enable_chroot_sysctl;
58775 +extern int grsec_enable_chroot_unix;
58776 +extern int grsec_enable_tpe;
58777 +extern int grsec_tpe_gid;
58778 +extern int grsec_enable_tpe_all;
58779 +extern int grsec_enable_tpe_invert;
58780 +extern int grsec_enable_socket_all;
58781 +extern int grsec_socket_all_gid;
58782 +extern int grsec_enable_socket_client;
58783 +extern int grsec_socket_client_gid;
58784 +extern int grsec_enable_socket_server;
58785 +extern int grsec_socket_server_gid;
58786 +extern int grsec_audit_gid;
58787 +extern int grsec_enable_group;
58788 +extern int grsec_enable_audit_textrel;
58789 +extern int grsec_enable_log_rwxmaps;
58790 +extern int grsec_enable_mount;
58791 +extern int grsec_enable_chdir;
58792 +extern int grsec_resource_logging;
58793 +extern int grsec_enable_blackhole;
58794 +extern int grsec_lastack_retries;
58795 +extern int grsec_enable_brute;
58796 +extern int grsec_lock;
58797 +
58798 +extern spinlock_t grsec_alert_lock;
58799 +extern unsigned long grsec_alert_wtime;
58800 +extern unsigned long grsec_alert_fyet;
58801 +
58802 +extern spinlock_t grsec_audit_lock;
58803 +
58804 +extern rwlock_t grsec_exec_file_lock;
58805 +
58806 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58807 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58808 + (tsk)->exec_file->f_vfsmnt) : "/")
58809 +
58810 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58811 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58812 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58813 +
58814 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58815 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58816 + (tsk)->exec_file->f_vfsmnt) : "/")
58817 +
58818 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58819 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58820 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58821 +
58822 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58823 +
58824 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58825 +
58826 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58827 + (task)->pid, (cred)->uid, \
58828 + (cred)->euid, (cred)->gid, (cred)->egid, \
58829 + gr_parent_task_fullpath(task), \
58830 + (task)->real_parent->comm, (task)->real_parent->pid, \
58831 + (pcred)->uid, (pcred)->euid, \
58832 + (pcred)->gid, (pcred)->egid
58833 +
58834 +#define GR_CHROOT_CAPS {{ \
58835 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58836 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58837 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58838 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58839 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58840 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58841 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58842 +
58843 +#define security_learn(normal_msg,args...) \
58844 +({ \
58845 + read_lock(&grsec_exec_file_lock); \
58846 + gr_add_learn_entry(normal_msg "\n", ## args); \
58847 + read_unlock(&grsec_exec_file_lock); \
58848 +})
58849 +
58850 +enum {
58851 + GR_DO_AUDIT,
58852 + GR_DONT_AUDIT,
58853 + /* used for non-audit messages that we shouldn't kill the task on */
58854 + GR_DONT_AUDIT_GOOD
58855 +};
58856 +
58857 +enum {
58858 + GR_TTYSNIFF,
58859 + GR_RBAC,
58860 + GR_RBAC_STR,
58861 + GR_STR_RBAC,
58862 + GR_RBAC_MODE2,
58863 + GR_RBAC_MODE3,
58864 + GR_FILENAME,
58865 + GR_SYSCTL_HIDDEN,
58866 + GR_NOARGS,
58867 + GR_ONE_INT,
58868 + GR_ONE_INT_TWO_STR,
58869 + GR_ONE_STR,
58870 + GR_STR_INT,
58871 + GR_TWO_STR_INT,
58872 + GR_TWO_INT,
58873 + GR_TWO_U64,
58874 + GR_THREE_INT,
58875 + GR_FIVE_INT_TWO_STR,
58876 + GR_TWO_STR,
58877 + GR_THREE_STR,
58878 + GR_FOUR_STR,
58879 + GR_STR_FILENAME,
58880 + GR_FILENAME_STR,
58881 + GR_FILENAME_TWO_INT,
58882 + GR_FILENAME_TWO_INT_STR,
58883 + GR_TEXTREL,
58884 + GR_PTRACE,
58885 + GR_RESOURCE,
58886 + GR_CAP,
58887 + GR_SIG,
58888 + GR_SIG2,
58889 + GR_CRASH1,
58890 + GR_CRASH2,
58891 + GR_PSACCT,
58892 + GR_RWXMAP
58893 +};
58894 +
58895 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58896 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58897 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58898 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58899 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58900 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58901 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58902 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58903 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58904 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58905 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58906 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58907 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58908 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58909 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58910 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58911 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58912 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58913 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58914 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58915 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58916 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58917 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58918 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58919 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58920 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58921 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58922 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58923 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58924 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58925 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58926 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58927 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58928 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58929 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58930 +
58931 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58932 +
58933 +#endif
58934 +
58935 +#endif
58936 diff -urNp linux-3.0.8/include/linux/grmsg.h linux-3.0.8/include/linux/grmsg.h
58937 --- linux-3.0.8/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58938 +++ linux-3.0.8/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
58939 @@ -0,0 +1,108 @@
58940 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58941 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58942 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58943 +#define GR_STOPMOD_MSG "denied modification of module state by "
58944 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58945 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58946 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58947 +#define GR_IOPL_MSG "denied use of iopl() by "
58948 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58949 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58950 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58951 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58952 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58953 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58954 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58955 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58956 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58957 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58958 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58959 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58960 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58961 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58962 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58963 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58964 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58965 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58966 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58967 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58968 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58969 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58970 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58971 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58972 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58973 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58974 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58975 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58976 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58977 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58978 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58979 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58980 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58981 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58982 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58983 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58984 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58985 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58986 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58987 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58988 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58989 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58990 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58991 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58992 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58993 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58994 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58995 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58996 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58997 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58998 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58999 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59000 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59001 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59002 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59003 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59004 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59005 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59006 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59007 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59008 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59009 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59010 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59011 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59012 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59013 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59014 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59015 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59016 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59017 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59018 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59019 +#define GR_TIME_MSG "time set by "
59020 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59021 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59022 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59023 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59024 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59025 +#define GR_BIND_MSG "denied bind() by "
59026 +#define GR_CONNECT_MSG "denied connect() by "
59027 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59028 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59029 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59030 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59031 +#define GR_CAP_ACL_MSG "use of %s denied for "
59032 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59033 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59034 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59035 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59036 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59037 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59038 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59039 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59040 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59041 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59042 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59043 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59044 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59045 +#define GR_VM86_MSG "denied use of vm86 by "
59046 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59047 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59048 diff -urNp linux-3.0.8/include/linux/grsecurity.h linux-3.0.8/include/linux/grsecurity.h
59049 --- linux-3.0.8/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
59050 +++ linux-3.0.8/include/linux/grsecurity.h 2011-10-17 06:35:30.000000000 -0400
59051 @@ -0,0 +1,228 @@
59052 +#ifndef GR_SECURITY_H
59053 +#define GR_SECURITY_H
59054 +#include <linux/fs.h>
59055 +#include <linux/fs_struct.h>
59056 +#include <linux/binfmts.h>
59057 +#include <linux/gracl.h>
59058 +
59059 +/* notify of brain-dead configs */
59060 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59061 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59062 +#endif
59063 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59064 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59065 +#endif
59066 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59067 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59068 +#endif
59069 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59070 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59071 +#endif
59072 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59073 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59074 +#endif
59075 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59076 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59077 +#endif
59078 +
59079 +#include <linux/compat.h>
59080 +
59081 +struct user_arg_ptr {
59082 +#ifdef CONFIG_COMPAT
59083 + bool is_compat;
59084 +#endif
59085 + union {
59086 + const char __user *const __user *native;
59087 +#ifdef CONFIG_COMPAT
59088 + compat_uptr_t __user *compat;
59089 +#endif
59090 + } ptr;
59091 +};
59092 +
59093 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59094 +void gr_handle_brute_check(void);
59095 +void gr_handle_kernel_exploit(void);
59096 +int gr_process_user_ban(void);
59097 +
59098 +char gr_roletype_to_char(void);
59099 +
59100 +int gr_acl_enable_at_secure(void);
59101 +
59102 +int gr_check_user_change(int real, int effective, int fs);
59103 +int gr_check_group_change(int real, int effective, int fs);
59104 +
59105 +void gr_del_task_from_ip_table(struct task_struct *p);
59106 +
59107 +int gr_pid_is_chrooted(struct task_struct *p);
59108 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59109 +int gr_handle_chroot_nice(void);
59110 +int gr_handle_chroot_sysctl(const int op);
59111 +int gr_handle_chroot_setpriority(struct task_struct *p,
59112 + const int niceval);
59113 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59114 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59115 + const struct vfsmount *mnt);
59116 +void gr_handle_chroot_chdir(struct path *path);
59117 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59118 + const struct vfsmount *mnt, const int mode);
59119 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59120 + const struct vfsmount *mnt, const int mode);
59121 +int gr_handle_chroot_mount(const struct dentry *dentry,
59122 + const struct vfsmount *mnt,
59123 + const char *dev_name);
59124 +int gr_handle_chroot_pivot(void);
59125 +int gr_handle_chroot_unix(const pid_t pid);
59126 +
59127 +int gr_handle_rawio(const struct inode *inode);
59128 +
59129 +void gr_handle_ioperm(void);
59130 +void gr_handle_iopl(void);
59131 +
59132 +int gr_tpe_allow(const struct file *file);
59133 +
59134 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59135 +void gr_clear_chroot_entries(struct task_struct *task);
59136 +
59137 +void gr_log_forkfail(const int retval);
59138 +void gr_log_timechange(void);
59139 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59140 +void gr_log_chdir(const struct dentry *dentry,
59141 + const struct vfsmount *mnt);
59142 +void gr_log_chroot_exec(const struct dentry *dentry,
59143 + const struct vfsmount *mnt);
59144 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59145 +void gr_log_remount(const char *devname, const int retval);
59146 +void gr_log_unmount(const char *devname, const int retval);
59147 +void gr_log_mount(const char *from, const char *to, const int retval);
59148 +void gr_log_textrel(struct vm_area_struct *vma);
59149 +void gr_log_rwxmmap(struct file *file);
59150 +void gr_log_rwxmprotect(struct file *file);
59151 +
59152 +int gr_handle_follow_link(const struct inode *parent,
59153 + const struct inode *inode,
59154 + const struct dentry *dentry,
59155 + const struct vfsmount *mnt);
59156 +int gr_handle_fifo(const struct dentry *dentry,
59157 + const struct vfsmount *mnt,
59158 + const struct dentry *dir, const int flag,
59159 + const int acc_mode);
59160 +int gr_handle_hardlink(const struct dentry *dentry,
59161 + const struct vfsmount *mnt,
59162 + struct inode *inode,
59163 + const int mode, const char *to);
59164 +
59165 +int gr_is_capable(const int cap);
59166 +int gr_is_capable_nolog(const int cap);
59167 +void gr_learn_resource(const struct task_struct *task, const int limit,
59168 + const unsigned long wanted, const int gt);
59169 +void gr_copy_label(struct task_struct *tsk);
59170 +void gr_handle_crash(struct task_struct *task, const int sig);
59171 +int gr_handle_signal(const struct task_struct *p, const int sig);
59172 +int gr_check_crash_uid(const uid_t uid);
59173 +int gr_check_protected_task(const struct task_struct *task);
59174 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59175 +int gr_acl_handle_mmap(const struct file *file,
59176 + const unsigned long prot);
59177 +int gr_acl_handle_mprotect(const struct file *file,
59178 + const unsigned long prot);
59179 +int gr_check_hidden_task(const struct task_struct *tsk);
59180 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59181 + const struct vfsmount *mnt);
59182 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59183 + const struct vfsmount *mnt);
59184 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59185 + const struct vfsmount *mnt, const int fmode);
59186 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59187 + const struct vfsmount *mnt, mode_t mode);
59188 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59189 + const struct vfsmount *mnt, mode_t mode);
59190 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59191 + const struct vfsmount *mnt);
59192 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59193 + const struct vfsmount *mnt);
59194 +int gr_handle_ptrace(struct task_struct *task, const long request);
59195 +int gr_handle_proc_ptrace(struct task_struct *task);
59196 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59197 + const struct vfsmount *mnt);
59198 +int gr_check_crash_exec(const struct file *filp);
59199 +int gr_acl_is_enabled(void);
59200 +void gr_set_kernel_label(struct task_struct *task);
59201 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59202 + const gid_t gid);
59203 +int gr_set_proc_label(const struct dentry *dentry,
59204 + const struct vfsmount *mnt,
59205 + const int unsafe_share);
59206 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59207 + const struct vfsmount *mnt);
59208 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59209 + const struct vfsmount *mnt, const int fmode);
59210 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59211 + const struct dentry *p_dentry,
59212 + const struct vfsmount *p_mnt, const int fmode,
59213 + const int imode);
59214 +void gr_handle_create(const struct dentry *dentry,
59215 + const struct vfsmount *mnt);
59216 +void gr_handle_proc_create(const struct dentry *dentry,
59217 + const struct inode *inode);
59218 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59219 + const struct dentry *parent_dentry,
59220 + const struct vfsmount *parent_mnt,
59221 + const int mode);
59222 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59223 + const struct dentry *parent_dentry,
59224 + const struct vfsmount *parent_mnt);
59225 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59226 + const struct vfsmount *mnt);
59227 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59228 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59229 + const struct vfsmount *mnt);
59230 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59231 + const struct dentry *parent_dentry,
59232 + const struct vfsmount *parent_mnt,
59233 + const char *from);
59234 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59235 + const struct dentry *parent_dentry,
59236 + const struct vfsmount *parent_mnt,
59237 + const struct dentry *old_dentry,
59238 + const struct vfsmount *old_mnt, const char *to);
59239 +int gr_acl_handle_rename(struct dentry *new_dentry,
59240 + struct dentry *parent_dentry,
59241 + const struct vfsmount *parent_mnt,
59242 + struct dentry *old_dentry,
59243 + struct inode *old_parent_inode,
59244 + struct vfsmount *old_mnt, const char *newname);
59245 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59246 + struct dentry *old_dentry,
59247 + struct dentry *new_dentry,
59248 + struct vfsmount *mnt, const __u8 replace);
59249 +__u32 gr_check_link(const struct dentry *new_dentry,
59250 + const struct dentry *parent_dentry,
59251 + const struct vfsmount *parent_mnt,
59252 + const struct dentry *old_dentry,
59253 + const struct vfsmount *old_mnt);
59254 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59255 + const unsigned int namelen, const ino_t ino);
59256 +
59257 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59258 + const struct vfsmount *mnt);
59259 +void gr_acl_handle_exit(void);
59260 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59261 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59262 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59263 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59264 +void gr_audit_ptrace(struct task_struct *task);
59265 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59266 +
59267 +#ifdef CONFIG_GRKERNSEC
59268 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59269 +void gr_handle_vm86(void);
59270 +void gr_handle_mem_readwrite(u64 from, u64 to);
59271 +
59272 +extern int grsec_enable_dmesg;
59273 +extern int grsec_disable_privio;
59274 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59275 +extern int grsec_enable_chroot_findtask;
59276 +#endif
59277 +#endif
59278 +
59279 +#endif
59280 diff -urNp linux-3.0.8/include/linux/grsock.h linux-3.0.8/include/linux/grsock.h
59281 --- linux-3.0.8/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
59282 +++ linux-3.0.8/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
59283 @@ -0,0 +1,19 @@
59284 +#ifndef __GRSOCK_H
59285 +#define __GRSOCK_H
59286 +
59287 +extern void gr_attach_curr_ip(const struct sock *sk);
59288 +extern int gr_handle_sock_all(const int family, const int type,
59289 + const int protocol);
59290 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59291 +extern int gr_handle_sock_server_other(const struct sock *sck);
59292 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59293 +extern int gr_search_connect(struct socket * sock,
59294 + struct sockaddr_in * addr);
59295 +extern int gr_search_bind(struct socket * sock,
59296 + struct sockaddr_in * addr);
59297 +extern int gr_search_listen(struct socket * sock);
59298 +extern int gr_search_accept(struct socket * sock);
59299 +extern int gr_search_socket(const int domain, const int type,
59300 + const int protocol);
59301 +
59302 +#endif
59303 diff -urNp linux-3.0.8/include/linux/hid.h linux-3.0.8/include/linux/hid.h
59304 --- linux-3.0.8/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
59305 +++ linux-3.0.8/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
59306 @@ -675,7 +675,7 @@ struct hid_ll_driver {
59307 unsigned int code, int value);
59308
59309 int (*parse)(struct hid_device *hdev);
59310 -};
59311 +} __no_const;
59312
59313 #define PM_HINT_FULLON 1<<5
59314 #define PM_HINT_NORMAL 1<<1
59315 diff -urNp linux-3.0.8/include/linux/highmem.h linux-3.0.8/include/linux/highmem.h
59316 --- linux-3.0.8/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
59317 +++ linux-3.0.8/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
59318 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
59319 kunmap_atomic(kaddr, KM_USER0);
59320 }
59321
59322 +static inline void sanitize_highpage(struct page *page)
59323 +{
59324 + void *kaddr;
59325 + unsigned long flags;
59326 +
59327 + local_irq_save(flags);
59328 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59329 + clear_page(kaddr);
59330 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59331 + local_irq_restore(flags);
59332 +}
59333 +
59334 static inline void zero_user_segments(struct page *page,
59335 unsigned start1, unsigned end1,
59336 unsigned start2, unsigned end2)
59337 diff -urNp linux-3.0.8/include/linux/i2c.h linux-3.0.8/include/linux/i2c.h
59338 --- linux-3.0.8/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
59339 +++ linux-3.0.8/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
59340 @@ -346,6 +346,7 @@ struct i2c_algorithm {
59341 /* To determine what the adapter supports */
59342 u32 (*functionality) (struct i2c_adapter *);
59343 };
59344 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59345
59346 /*
59347 * i2c_adapter is the structure used to identify a physical i2c bus along
59348 diff -urNp linux-3.0.8/include/linux/i2o.h linux-3.0.8/include/linux/i2o.h
59349 --- linux-3.0.8/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
59350 +++ linux-3.0.8/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
59351 @@ -564,7 +564,7 @@ struct i2o_controller {
59352 struct i2o_device *exec; /* Executive */
59353 #if BITS_PER_LONG == 64
59354 spinlock_t context_list_lock; /* lock for context_list */
59355 - atomic_t context_list_counter; /* needed for unique contexts */
59356 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59357 struct list_head context_list; /* list of context id's
59358 and pointers */
59359 #endif
59360 diff -urNp linux-3.0.8/include/linux/init.h linux-3.0.8/include/linux/init.h
59361 --- linux-3.0.8/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
59362 +++ linux-3.0.8/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
59363 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
59364
59365 /* Each module must use one module_init(). */
59366 #define module_init(initfn) \
59367 - static inline initcall_t __inittest(void) \
59368 + static inline __used initcall_t __inittest(void) \
59369 { return initfn; } \
59370 int init_module(void) __attribute__((alias(#initfn)));
59371
59372 /* This is only required if you want to be unloadable. */
59373 #define module_exit(exitfn) \
59374 - static inline exitcall_t __exittest(void) \
59375 + static inline __used exitcall_t __exittest(void) \
59376 { return exitfn; } \
59377 void cleanup_module(void) __attribute__((alias(#exitfn)));
59378
59379 diff -urNp linux-3.0.8/include/linux/init_task.h linux-3.0.8/include/linux/init_task.h
59380 --- linux-3.0.8/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
59381 +++ linux-3.0.8/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
59382 @@ -126,6 +126,12 @@ extern struct cred init_cred;
59383 # define INIT_PERF_EVENTS(tsk)
59384 #endif
59385
59386 +#ifdef CONFIG_X86
59387 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59388 +#else
59389 +#define INIT_TASK_THREAD_INFO
59390 +#endif
59391 +
59392 /*
59393 * INIT_TASK is used to set up the first task table, touch at
59394 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59395 @@ -164,6 +170,7 @@ extern struct cred init_cred;
59396 RCU_INIT_POINTER(.cred, &init_cred), \
59397 .comm = "swapper", \
59398 .thread = INIT_THREAD, \
59399 + INIT_TASK_THREAD_INFO \
59400 .fs = &init_fs, \
59401 .files = &init_files, \
59402 .signal = &init_signals, \
59403 diff -urNp linux-3.0.8/include/linux/intel-iommu.h linux-3.0.8/include/linux/intel-iommu.h
59404 --- linux-3.0.8/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
59405 +++ linux-3.0.8/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
59406 @@ -296,7 +296,7 @@ struct iommu_flush {
59407 u8 fm, u64 type);
59408 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59409 unsigned int size_order, u64 type);
59410 -};
59411 +} __no_const;
59412
59413 enum {
59414 SR_DMAR_FECTL_REG,
59415 diff -urNp linux-3.0.8/include/linux/interrupt.h linux-3.0.8/include/linux/interrupt.h
59416 --- linux-3.0.8/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
59417 +++ linux-3.0.8/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
59418 @@ -422,7 +422,7 @@ enum
59419 /* map softirq index to softirq name. update 'softirq_to_name' in
59420 * kernel/softirq.c when adding a new softirq.
59421 */
59422 -extern char *softirq_to_name[NR_SOFTIRQS];
59423 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59424
59425 /* softirq mask and active fields moved to irq_cpustat_t in
59426 * asm/hardirq.h to get better cache usage. KAO
59427 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59428
59429 struct softirq_action
59430 {
59431 - void (*action)(struct softirq_action *);
59432 + void (*action)(void);
59433 };
59434
59435 asmlinkage void do_softirq(void);
59436 asmlinkage void __do_softirq(void);
59437 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59438 +extern void open_softirq(int nr, void (*action)(void));
59439 extern void softirq_init(void);
59440 static inline void __raise_softirq_irqoff(unsigned int nr)
59441 {
59442 diff -urNp linux-3.0.8/include/linux/kallsyms.h linux-3.0.8/include/linux/kallsyms.h
59443 --- linux-3.0.8/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
59444 +++ linux-3.0.8/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
59445 @@ -15,7 +15,8 @@
59446
59447 struct module;
59448
59449 -#ifdef CONFIG_KALLSYMS
59450 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59451 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59452 /* Lookup the address for a symbol. Returns 0 if not found. */
59453 unsigned long kallsyms_lookup_name(const char *name);
59454
59455 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
59456 /* Stupid that this does nothing, but I didn't create this mess. */
59457 #define __print_symbol(fmt, addr)
59458 #endif /*CONFIG_KALLSYMS*/
59459 +#else /* when included by kallsyms.c, vsnprintf.c, or
59460 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59461 +extern void __print_symbol(const char *fmt, unsigned long address);
59462 +extern int sprint_backtrace(char *buffer, unsigned long address);
59463 +extern int sprint_symbol(char *buffer, unsigned long address);
59464 +const char *kallsyms_lookup(unsigned long addr,
59465 + unsigned long *symbolsize,
59466 + unsigned long *offset,
59467 + char **modname, char *namebuf);
59468 +#endif
59469
59470 /* This macro allows us to keep printk typechecking */
59471 static void __check_printsym_format(const char *fmt, ...)
59472 diff -urNp linux-3.0.8/include/linux/kgdb.h linux-3.0.8/include/linux/kgdb.h
59473 --- linux-3.0.8/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
59474 +++ linux-3.0.8/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
59475 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59476 extern int kgdb_io_module_registered;
59477
59478 extern atomic_t kgdb_setting_breakpoint;
59479 -extern atomic_t kgdb_cpu_doing_single_step;
59480 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59481
59482 extern struct task_struct *kgdb_usethread;
59483 extern struct task_struct *kgdb_contthread;
59484 @@ -251,7 +251,7 @@ struct kgdb_arch {
59485 void (*disable_hw_break)(struct pt_regs *regs);
59486 void (*remove_all_hw_break)(void);
59487 void (*correct_hw_break)(void);
59488 -};
59489 +} __do_const;
59490
59491 /**
59492 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59493 @@ -276,7 +276,7 @@ struct kgdb_io {
59494 void (*pre_exception) (void);
59495 void (*post_exception) (void);
59496 int is_console;
59497 -};
59498 +} __do_const;
59499
59500 extern struct kgdb_arch arch_kgdb_ops;
59501
59502 diff -urNp linux-3.0.8/include/linux/kmod.h linux-3.0.8/include/linux/kmod.h
59503 --- linux-3.0.8/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
59504 +++ linux-3.0.8/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
59505 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
59506 * usually useless though. */
59507 extern int __request_module(bool wait, const char *name, ...) \
59508 __attribute__((format(printf, 2, 3)));
59509 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59510 + __attribute__((format(printf, 3, 4)));
59511 #define request_module(mod...) __request_module(true, mod)
59512 #define request_module_nowait(mod...) __request_module(false, mod)
59513 #define try_then_request_module(x, mod...) \
59514 diff -urNp linux-3.0.8/include/linux/kvm_host.h linux-3.0.8/include/linux/kvm_host.h
59515 --- linux-3.0.8/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
59516 +++ linux-3.0.8/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
59517 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59518 void vcpu_load(struct kvm_vcpu *vcpu);
59519 void vcpu_put(struct kvm_vcpu *vcpu);
59520
59521 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59522 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59523 struct module *module);
59524 void kvm_exit(void);
59525
59526 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59527 struct kvm_guest_debug *dbg);
59528 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59529
59530 -int kvm_arch_init(void *opaque);
59531 +int kvm_arch_init(const void *opaque);
59532 void kvm_arch_exit(void);
59533
59534 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59535 diff -urNp linux-3.0.8/include/linux/libata.h linux-3.0.8/include/linux/libata.h
59536 --- linux-3.0.8/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
59537 +++ linux-3.0.8/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
59538 @@ -899,7 +899,7 @@ struct ata_port_operations {
59539 * fields must be pointers.
59540 */
59541 const struct ata_port_operations *inherits;
59542 -};
59543 +} __do_const;
59544
59545 struct ata_port_info {
59546 unsigned long flags;
59547 diff -urNp linux-3.0.8/include/linux/mca.h linux-3.0.8/include/linux/mca.h
59548 --- linux-3.0.8/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
59549 +++ linux-3.0.8/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
59550 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59551 int region);
59552 void * (*mca_transform_memory)(struct mca_device *,
59553 void *memory);
59554 -};
59555 +} __no_const;
59556
59557 struct mca_bus {
59558 u64 default_dma_mask;
59559 diff -urNp linux-3.0.8/include/linux/memory.h linux-3.0.8/include/linux/memory.h
59560 --- linux-3.0.8/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
59561 +++ linux-3.0.8/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
59562 @@ -144,7 +144,7 @@ struct memory_accessor {
59563 size_t count);
59564 ssize_t (*write)(struct memory_accessor *, const char *buf,
59565 off_t offset, size_t count);
59566 -};
59567 +} __no_const;
59568
59569 /*
59570 * Kernel text modification mutex, used for code patching. Users of this lock
59571 diff -urNp linux-3.0.8/include/linux/mfd/abx500.h linux-3.0.8/include/linux/mfd/abx500.h
59572 --- linux-3.0.8/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
59573 +++ linux-3.0.8/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
59574 @@ -234,6 +234,7 @@ struct abx500_ops {
59575 int (*event_registers_startup_state_get) (struct device *, u8 *);
59576 int (*startup_irq_enabled) (struct device *, unsigned int);
59577 };
59578 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59579
59580 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59581 void abx500_remove_ops(struct device *dev);
59582 diff -urNp linux-3.0.8/include/linux/mm.h linux-3.0.8/include/linux/mm.h
59583 --- linux-3.0.8/include/linux/mm.h 2011-10-24 08:05:21.000000000 -0400
59584 +++ linux-3.0.8/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
59585 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
59586
59587 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59588 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59589 +
59590 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59591 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59592 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59593 +#else
59594 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59595 +#endif
59596 +
59597 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59598 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59599
59600 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
59601 int set_page_dirty_lock(struct page *page);
59602 int clear_page_dirty_for_io(struct page *page);
59603
59604 -/* Is the vma a continuation of the stack vma above it? */
59605 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59606 -{
59607 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59608 -}
59609 -
59610 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59611 - unsigned long addr)
59612 -{
59613 - return (vma->vm_flags & VM_GROWSDOWN) &&
59614 - (vma->vm_start == addr) &&
59615 - !vma_growsdown(vma->vm_prev, addr);
59616 -}
59617 -
59618 -/* Is the vma a continuation of the stack vma below it? */
59619 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59620 -{
59621 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59622 -}
59623 -
59624 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59625 - unsigned long addr)
59626 -{
59627 - return (vma->vm_flags & VM_GROWSUP) &&
59628 - (vma->vm_end == addr) &&
59629 - !vma_growsup(vma->vm_next, addr);
59630 -}
59631 -
59632 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59633 unsigned long old_addr, struct vm_area_struct *new_vma,
59634 unsigned long new_addr, unsigned long len);
59635 @@ -1169,6 +1148,15 @@ struct shrinker {
59636 extern void register_shrinker(struct shrinker *);
59637 extern void unregister_shrinker(struct shrinker *);
59638
59639 +#ifdef CONFIG_MMU
59640 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59641 +#else
59642 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59643 +{
59644 + return __pgprot(0);
59645 +}
59646 +#endif
59647 +
59648 int vma_wants_writenotify(struct vm_area_struct *vma);
59649
59650 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59651 @@ -1452,6 +1440,7 @@ out:
59652 }
59653
59654 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59655 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59656
59657 extern unsigned long do_brk(unsigned long, unsigned long);
59658
59659 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
59660 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59661 struct vm_area_struct **pprev);
59662
59663 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59664 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59665 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59666 +
59667 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59668 NULL if none. Assume start_addr < end_addr. */
59669 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59670 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
59671 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59672 }
59673
59674 -#ifdef CONFIG_MMU
59675 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59676 -#else
59677 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59678 -{
59679 - return __pgprot(0);
59680 -}
59681 -#endif
59682 -
59683 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59684 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59685 unsigned long pfn, unsigned long size, pgprot_t);
59686 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
59687 extern int sysctl_memory_failure_early_kill;
59688 extern int sysctl_memory_failure_recovery;
59689 extern void shake_page(struct page *p, int access);
59690 -extern atomic_long_t mce_bad_pages;
59691 +extern atomic_long_unchecked_t mce_bad_pages;
59692 extern int soft_offline_page(struct page *page, int flags);
59693
59694 extern void dump_page(struct page *page);
59695 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
59696 unsigned int pages_per_huge_page);
59697 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59698
59699 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59700 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59701 +#else
59702 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59703 +#endif
59704 +
59705 #endif /* __KERNEL__ */
59706 #endif /* _LINUX_MM_H */
59707 diff -urNp linux-3.0.8/include/linux/mm_types.h linux-3.0.8/include/linux/mm_types.h
59708 --- linux-3.0.8/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
59709 +++ linux-3.0.8/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
59710 @@ -184,6 +184,8 @@ struct vm_area_struct {
59711 #ifdef CONFIG_NUMA
59712 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59713 #endif
59714 +
59715 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59716 };
59717
59718 struct core_thread {
59719 @@ -316,6 +318,24 @@ struct mm_struct {
59720 #ifdef CONFIG_CPUMASK_OFFSTACK
59721 struct cpumask cpumask_allocation;
59722 #endif
59723 +
59724 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59725 + unsigned long pax_flags;
59726 +#endif
59727 +
59728 +#ifdef CONFIG_PAX_DLRESOLVE
59729 + unsigned long call_dl_resolve;
59730 +#endif
59731 +
59732 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59733 + unsigned long call_syscall;
59734 +#endif
59735 +
59736 +#ifdef CONFIG_PAX_ASLR
59737 + unsigned long delta_mmap; /* randomized offset */
59738 + unsigned long delta_stack; /* randomized offset */
59739 +#endif
59740 +
59741 };
59742
59743 static inline void mm_init_cpumask(struct mm_struct *mm)
59744 diff -urNp linux-3.0.8/include/linux/mmu_notifier.h linux-3.0.8/include/linux/mmu_notifier.h
59745 --- linux-3.0.8/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
59746 +++ linux-3.0.8/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
59747 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59748 */
59749 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59750 ({ \
59751 - pte_t __pte; \
59752 + pte_t ___pte; \
59753 struct vm_area_struct *___vma = __vma; \
59754 unsigned long ___address = __address; \
59755 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59756 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59757 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59758 - __pte; \
59759 + ___pte; \
59760 })
59761
59762 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59763 diff -urNp linux-3.0.8/include/linux/mmzone.h linux-3.0.8/include/linux/mmzone.h
59764 --- linux-3.0.8/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
59765 +++ linux-3.0.8/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
59766 @@ -350,7 +350,7 @@ struct zone {
59767 unsigned long flags; /* zone flags, see below */
59768
59769 /* Zone statistics */
59770 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59771 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59772
59773 /*
59774 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59775 diff -urNp linux-3.0.8/include/linux/mod_devicetable.h linux-3.0.8/include/linux/mod_devicetable.h
59776 --- linux-3.0.8/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
59777 +++ linux-3.0.8/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
59778 @@ -12,7 +12,7 @@
59779 typedef unsigned long kernel_ulong_t;
59780 #endif
59781
59782 -#define PCI_ANY_ID (~0)
59783 +#define PCI_ANY_ID ((__u16)~0)
59784
59785 struct pci_device_id {
59786 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59787 @@ -131,7 +131,7 @@ struct usb_device_id {
59788 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59789 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59790
59791 -#define HID_ANY_ID (~0)
59792 +#define HID_ANY_ID (~0U)
59793
59794 struct hid_device_id {
59795 __u16 bus;
59796 diff -urNp linux-3.0.8/include/linux/module.h linux-3.0.8/include/linux/module.h
59797 --- linux-3.0.8/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
59798 +++ linux-3.0.8/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
59799 @@ -16,6 +16,7 @@
59800 #include <linux/kobject.h>
59801 #include <linux/moduleparam.h>
59802 #include <linux/tracepoint.h>
59803 +#include <linux/fs.h>
59804
59805 #include <linux/percpu.h>
59806 #include <asm/module.h>
59807 @@ -325,19 +326,16 @@ struct module
59808 int (*init)(void);
59809
59810 /* If this is non-NULL, vfree after init() returns */
59811 - void *module_init;
59812 + void *module_init_rx, *module_init_rw;
59813
59814 /* Here is the actual code + data, vfree'd on unload. */
59815 - void *module_core;
59816 + void *module_core_rx, *module_core_rw;
59817
59818 /* Here are the sizes of the init and core sections */
59819 - unsigned int init_size, core_size;
59820 + unsigned int init_size_rw, core_size_rw;
59821
59822 /* The size of the executable code in each section. */
59823 - unsigned int init_text_size, core_text_size;
59824 -
59825 - /* Size of RO sections of the module (text+rodata) */
59826 - unsigned int init_ro_size, core_ro_size;
59827 + unsigned int init_size_rx, core_size_rx;
59828
59829 /* Arch-specific module values */
59830 struct mod_arch_specific arch;
59831 @@ -393,6 +391,10 @@ struct module
59832 #ifdef CONFIG_EVENT_TRACING
59833 struct ftrace_event_call **trace_events;
59834 unsigned int num_trace_events;
59835 + struct file_operations trace_id;
59836 + struct file_operations trace_enable;
59837 + struct file_operations trace_format;
59838 + struct file_operations trace_filter;
59839 #endif
59840 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59841 unsigned int num_ftrace_callsites;
59842 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
59843 bool is_module_percpu_address(unsigned long addr);
59844 bool is_module_text_address(unsigned long addr);
59845
59846 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59847 +{
59848 +
59849 +#ifdef CONFIG_PAX_KERNEXEC
59850 + if (ktla_ktva(addr) >= (unsigned long)start &&
59851 + ktla_ktva(addr) < (unsigned long)start + size)
59852 + return 1;
59853 +#endif
59854 +
59855 + return ((void *)addr >= start && (void *)addr < start + size);
59856 +}
59857 +
59858 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59859 +{
59860 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59861 +}
59862 +
59863 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59864 +{
59865 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59866 +}
59867 +
59868 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59869 +{
59870 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59871 +}
59872 +
59873 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59874 +{
59875 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59876 +}
59877 +
59878 static inline int within_module_core(unsigned long addr, struct module *mod)
59879 {
59880 - return (unsigned long)mod->module_core <= addr &&
59881 - addr < (unsigned long)mod->module_core + mod->core_size;
59882 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59883 }
59884
59885 static inline int within_module_init(unsigned long addr, struct module *mod)
59886 {
59887 - return (unsigned long)mod->module_init <= addr &&
59888 - addr < (unsigned long)mod->module_init + mod->init_size;
59889 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59890 }
59891
59892 /* Search for module by name: must hold module_mutex. */
59893 diff -urNp linux-3.0.8/include/linux/moduleloader.h linux-3.0.8/include/linux/moduleloader.h
59894 --- linux-3.0.8/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
59895 +++ linux-3.0.8/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
59896 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59897 sections. Returns NULL on failure. */
59898 void *module_alloc(unsigned long size);
59899
59900 +#ifdef CONFIG_PAX_KERNEXEC
59901 +void *module_alloc_exec(unsigned long size);
59902 +#else
59903 +#define module_alloc_exec(x) module_alloc(x)
59904 +#endif
59905 +
59906 /* Free memory returned from module_alloc. */
59907 void module_free(struct module *mod, void *module_region);
59908
59909 +#ifdef CONFIG_PAX_KERNEXEC
59910 +void module_free_exec(struct module *mod, void *module_region);
59911 +#else
59912 +#define module_free_exec(x, y) module_free((x), (y))
59913 +#endif
59914 +
59915 /* Apply the given relocation to the (simplified) ELF. Return -error
59916 or 0. */
59917 int apply_relocate(Elf_Shdr *sechdrs,
59918 diff -urNp linux-3.0.8/include/linux/moduleparam.h linux-3.0.8/include/linux/moduleparam.h
59919 --- linux-3.0.8/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
59920 +++ linux-3.0.8/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
59921 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59922 * @len is usually just sizeof(string).
59923 */
59924 #define module_param_string(name, string, len, perm) \
59925 - static const struct kparam_string __param_string_##name \
59926 + static const struct kparam_string __param_string_##name __used \
59927 = { len, string }; \
59928 __module_param_call(MODULE_PARAM_PREFIX, name, \
59929 &param_ops_string, \
59930 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59931 * module_param_named() for why this might be necessary.
59932 */
59933 #define module_param_array_named(name, array, type, nump, perm) \
59934 - static const struct kparam_array __param_arr_##name \
59935 + static const struct kparam_array __param_arr_##name __used \
59936 = { .max = ARRAY_SIZE(array), .num = nump, \
59937 .ops = &param_ops_##type, \
59938 .elemsize = sizeof(array[0]), .elem = array }; \
59939 diff -urNp linux-3.0.8/include/linux/namei.h linux-3.0.8/include/linux/namei.h
59940 --- linux-3.0.8/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
59941 +++ linux-3.0.8/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
59942 @@ -24,7 +24,7 @@ struct nameidata {
59943 unsigned seq;
59944 int last_type;
59945 unsigned depth;
59946 - char *saved_names[MAX_NESTED_LINKS + 1];
59947 + const char *saved_names[MAX_NESTED_LINKS + 1];
59948
59949 /* Intent data */
59950 union {
59951 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
59952 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59953 extern void unlock_rename(struct dentry *, struct dentry *);
59954
59955 -static inline void nd_set_link(struct nameidata *nd, char *path)
59956 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59957 {
59958 nd->saved_names[nd->depth] = path;
59959 }
59960
59961 -static inline char *nd_get_link(struct nameidata *nd)
59962 +static inline const char *nd_get_link(const struct nameidata *nd)
59963 {
59964 return nd->saved_names[nd->depth];
59965 }
59966 diff -urNp linux-3.0.8/include/linux/netdevice.h linux-3.0.8/include/linux/netdevice.h
59967 --- linux-3.0.8/include/linux/netdevice.h 2011-10-24 08:05:21.000000000 -0400
59968 +++ linux-3.0.8/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
59969 @@ -979,6 +979,7 @@ struct net_device_ops {
59970 int (*ndo_set_features)(struct net_device *dev,
59971 u32 features);
59972 };
59973 +typedef struct net_device_ops __no_const net_device_ops_no_const;
59974
59975 /*
59976 * The DEVICE structure.
59977 diff -urNp linux-3.0.8/include/linux/netfilter/xt_gradm.h linux-3.0.8/include/linux/netfilter/xt_gradm.h
59978 --- linux-3.0.8/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59979 +++ linux-3.0.8/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
59980 @@ -0,0 +1,9 @@
59981 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59982 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59983 +
59984 +struct xt_gradm_mtinfo {
59985 + __u16 flags;
59986 + __u16 invflags;
59987 +};
59988 +
59989 +#endif
59990 diff -urNp linux-3.0.8/include/linux/of_pdt.h linux-3.0.8/include/linux/of_pdt.h
59991 --- linux-3.0.8/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
59992 +++ linux-3.0.8/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
59993 @@ -32,7 +32,7 @@ struct of_pdt_ops {
59994
59995 /* return 0 on success; fill in 'len' with number of bytes in path */
59996 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59997 -};
59998 +} __no_const;
59999
60000 extern void *prom_early_alloc(unsigned long size);
60001
60002 diff -urNp linux-3.0.8/include/linux/oprofile.h linux-3.0.8/include/linux/oprofile.h
60003 --- linux-3.0.8/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
60004 +++ linux-3.0.8/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
60005 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
60006 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60007 char const * name, ulong * val);
60008
60009 -/** Create a file for read-only access to an atomic_t. */
60010 +/** Create a file for read-only access to an atomic_unchecked_t. */
60011 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60012 - char const * name, atomic_t * val);
60013 + char const * name, atomic_unchecked_t * val);
60014
60015 /** create a directory */
60016 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60017 diff -urNp linux-3.0.8/include/linux/padata.h linux-3.0.8/include/linux/padata.h
60018 --- linux-3.0.8/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
60019 +++ linux-3.0.8/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
60020 @@ -129,7 +129,7 @@ struct parallel_data {
60021 struct padata_instance *pinst;
60022 struct padata_parallel_queue __percpu *pqueue;
60023 struct padata_serial_queue __percpu *squeue;
60024 - atomic_t seq_nr;
60025 + atomic_unchecked_t seq_nr;
60026 atomic_t reorder_objects;
60027 atomic_t refcnt;
60028 unsigned int max_seq_nr;
60029 diff -urNp linux-3.0.8/include/linux/perf_event.h linux-3.0.8/include/linux/perf_event.h
60030 --- linux-3.0.8/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
60031 +++ linux-3.0.8/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
60032 @@ -761,8 +761,8 @@ struct perf_event {
60033
60034 enum perf_event_active_state state;
60035 unsigned int attach_state;
60036 - local64_t count;
60037 - atomic64_t child_count;
60038 + local64_t count; /* PaX: fix it one day */
60039 + atomic64_unchecked_t child_count;
60040
60041 /*
60042 * These are the total time in nanoseconds that the event
60043 @@ -813,8 +813,8 @@ struct perf_event {
60044 * These accumulate total time (in nanoseconds) that children
60045 * events have been enabled and running, respectively.
60046 */
60047 - atomic64_t child_total_time_enabled;
60048 - atomic64_t child_total_time_running;
60049 + atomic64_unchecked_t child_total_time_enabled;
60050 + atomic64_unchecked_t child_total_time_running;
60051
60052 /*
60053 * Protect attach/detach and child_list:
60054 diff -urNp linux-3.0.8/include/linux/pipe_fs_i.h linux-3.0.8/include/linux/pipe_fs_i.h
60055 --- linux-3.0.8/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
60056 +++ linux-3.0.8/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
60057 @@ -46,9 +46,9 @@ struct pipe_buffer {
60058 struct pipe_inode_info {
60059 wait_queue_head_t wait;
60060 unsigned int nrbufs, curbuf, buffers;
60061 - unsigned int readers;
60062 - unsigned int writers;
60063 - unsigned int waiting_writers;
60064 + atomic_t readers;
60065 + atomic_t writers;
60066 + atomic_t waiting_writers;
60067 unsigned int r_counter;
60068 unsigned int w_counter;
60069 struct page *tmp_page;
60070 diff -urNp linux-3.0.8/include/linux/pm_runtime.h linux-3.0.8/include/linux/pm_runtime.h
60071 --- linux-3.0.8/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
60072 +++ linux-3.0.8/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
60073 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
60074
60075 static inline void pm_runtime_mark_last_busy(struct device *dev)
60076 {
60077 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60078 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60079 }
60080
60081 #else /* !CONFIG_PM_RUNTIME */
60082 diff -urNp linux-3.0.8/include/linux/poison.h linux-3.0.8/include/linux/poison.h
60083 --- linux-3.0.8/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
60084 +++ linux-3.0.8/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
60085 @@ -19,8 +19,8 @@
60086 * under normal circumstances, used to verify that nobody uses
60087 * non-initialized list entries.
60088 */
60089 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60090 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60091 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60092 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60093
60094 /********** include/linux/timer.h **********/
60095 /*
60096 diff -urNp linux-3.0.8/include/linux/preempt.h linux-3.0.8/include/linux/preempt.h
60097 --- linux-3.0.8/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
60098 +++ linux-3.0.8/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
60099 @@ -115,7 +115,7 @@ struct preempt_ops {
60100 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60101 void (*sched_out)(struct preempt_notifier *notifier,
60102 struct task_struct *next);
60103 -};
60104 +} __no_const;
60105
60106 /**
60107 * preempt_notifier - key for installing preemption notifiers
60108 diff -urNp linux-3.0.8/include/linux/proc_fs.h linux-3.0.8/include/linux/proc_fs.h
60109 --- linux-3.0.8/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
60110 +++ linux-3.0.8/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
60111 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60112 return proc_create_data(name, mode, parent, proc_fops, NULL);
60113 }
60114
60115 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60116 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60117 +{
60118 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60119 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60120 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60121 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60122 +#else
60123 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60124 +#endif
60125 +}
60126 +
60127 +
60128 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60129 mode_t mode, struct proc_dir_entry *base,
60130 read_proc_t *read_proc, void * data)
60131 @@ -258,7 +271,7 @@ union proc_op {
60132 int (*proc_show)(struct seq_file *m,
60133 struct pid_namespace *ns, struct pid *pid,
60134 struct task_struct *task);
60135 -};
60136 +} __no_const;
60137
60138 struct ctl_table_header;
60139 struct ctl_table;
60140 diff -urNp linux-3.0.8/include/linux/ptrace.h linux-3.0.8/include/linux/ptrace.h
60141 --- linux-3.0.8/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
60142 +++ linux-3.0.8/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
60143 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
60144 extern void exit_ptrace(struct task_struct *tracer);
60145 #define PTRACE_MODE_READ 1
60146 #define PTRACE_MODE_ATTACH 2
60147 -/* Returns 0 on success, -errno on denial. */
60148 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60149 /* Returns true on success, false on denial. */
60150 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60151 +/* Returns true on success, false on denial. */
60152 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60153
60154 static inline int ptrace_reparented(struct task_struct *child)
60155 {
60156 diff -urNp linux-3.0.8/include/linux/random.h linux-3.0.8/include/linux/random.h
60157 --- linux-3.0.8/include/linux/random.h 2011-10-24 08:05:21.000000000 -0400
60158 +++ linux-3.0.8/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
60159 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60160
60161 u32 prandom32(struct rnd_state *);
60162
60163 +static inline unsigned long pax_get_random_long(void)
60164 +{
60165 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60166 +}
60167 +
60168 /*
60169 * Handle minimum values for seeds
60170 */
60171 static inline u32 __seed(u32 x, u32 m)
60172 {
60173 - return (x < m) ? x + m : x;
60174 + return (x <= m) ? x + m + 1 : x;
60175 }
60176
60177 /**
60178 diff -urNp linux-3.0.8/include/linux/reboot.h linux-3.0.8/include/linux/reboot.h
60179 --- linux-3.0.8/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
60180 +++ linux-3.0.8/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
60181 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60182 * Architecture-specific implementations of sys_reboot commands.
60183 */
60184
60185 -extern void machine_restart(char *cmd);
60186 -extern void machine_halt(void);
60187 -extern void machine_power_off(void);
60188 +extern void machine_restart(char *cmd) __noreturn;
60189 +extern void machine_halt(void) __noreturn;
60190 +extern void machine_power_off(void) __noreturn;
60191
60192 extern void machine_shutdown(void);
60193 struct pt_regs;
60194 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60195 */
60196
60197 extern void kernel_restart_prepare(char *cmd);
60198 -extern void kernel_restart(char *cmd);
60199 -extern void kernel_halt(void);
60200 -extern void kernel_power_off(void);
60201 +extern void kernel_restart(char *cmd) __noreturn;
60202 +extern void kernel_halt(void) __noreturn;
60203 +extern void kernel_power_off(void) __noreturn;
60204
60205 extern int C_A_D; /* for sysctl */
60206 void ctrl_alt_del(void);
60207 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
60208 * Emergency restart, callable from an interrupt handler.
60209 */
60210
60211 -extern void emergency_restart(void);
60212 +extern void emergency_restart(void) __noreturn;
60213 #include <asm/emergency-restart.h>
60214
60215 #endif
60216 diff -urNp linux-3.0.8/include/linux/reiserfs_fs.h linux-3.0.8/include/linux/reiserfs_fs.h
60217 --- linux-3.0.8/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
60218 +++ linux-3.0.8/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
60219 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
60220 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60221
60222 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60223 -#define get_generation(s) atomic_read (&fs_generation(s))
60224 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60225 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60226 #define __fs_changed(gen,s) (gen != get_generation (s))
60227 #define fs_changed(gen,s) \
60228 diff -urNp linux-3.0.8/include/linux/reiserfs_fs_sb.h linux-3.0.8/include/linux/reiserfs_fs_sb.h
60229 --- linux-3.0.8/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
60230 +++ linux-3.0.8/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
60231 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60232 /* Comment? -Hans */
60233 wait_queue_head_t s_wait;
60234 /* To be obsoleted soon by per buffer seals.. -Hans */
60235 - atomic_t s_generation_counter; // increased by one every time the
60236 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60237 // tree gets re-balanced
60238 unsigned long s_properties; /* File system properties. Currently holds
60239 on-disk FS format */
60240 diff -urNp linux-3.0.8/include/linux/relay.h linux-3.0.8/include/linux/relay.h
60241 --- linux-3.0.8/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
60242 +++ linux-3.0.8/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
60243 @@ -159,7 +159,7 @@ struct rchan_callbacks
60244 * The callback should return 0 if successful, negative if not.
60245 */
60246 int (*remove_buf_file)(struct dentry *dentry);
60247 -};
60248 +} __no_const;
60249
60250 /*
60251 * CONFIG_RELAY kernel API, kernel/relay.c
60252 diff -urNp linux-3.0.8/include/linux/rfkill.h linux-3.0.8/include/linux/rfkill.h
60253 --- linux-3.0.8/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
60254 +++ linux-3.0.8/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
60255 @@ -147,6 +147,7 @@ struct rfkill_ops {
60256 void (*query)(struct rfkill *rfkill, void *data);
60257 int (*set_block)(void *data, bool blocked);
60258 };
60259 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60260
60261 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60262 /**
60263 diff -urNp linux-3.0.8/include/linux/rmap.h linux-3.0.8/include/linux/rmap.h
60264 --- linux-3.0.8/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
60265 +++ linux-3.0.8/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
60266 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
60267 void anon_vma_init(void); /* create anon_vma_cachep */
60268 int anon_vma_prepare(struct vm_area_struct *);
60269 void unlink_anon_vmas(struct vm_area_struct *);
60270 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60271 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60272 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60273 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60274 void __anon_vma_link(struct vm_area_struct *);
60275
60276 static inline void anon_vma_merge(struct vm_area_struct *vma,
60277 diff -urNp linux-3.0.8/include/linux/sched.h linux-3.0.8/include/linux/sched.h
60278 --- linux-3.0.8/include/linux/sched.h 2011-10-24 08:05:32.000000000 -0400
60279 +++ linux-3.0.8/include/linux/sched.h 2011-10-17 23:17:19.000000000 -0400
60280 @@ -100,6 +100,7 @@ struct bio_list;
60281 struct fs_struct;
60282 struct perf_event_context;
60283 struct blk_plug;
60284 +struct linux_binprm;
60285
60286 /*
60287 * List of flags we want to share for kernel threads,
60288 @@ -380,10 +381,13 @@ struct user_namespace;
60289 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60290
60291 extern int sysctl_max_map_count;
60292 +extern unsigned long sysctl_heap_stack_gap;
60293
60294 #include <linux/aio.h>
60295
60296 #ifdef CONFIG_MMU
60297 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60298 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60299 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60300 extern unsigned long
60301 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60302 @@ -629,6 +633,17 @@ struct signal_struct {
60303 #ifdef CONFIG_TASKSTATS
60304 struct taskstats *stats;
60305 #endif
60306 +
60307 +#ifdef CONFIG_GRKERNSEC
60308 + u32 curr_ip;
60309 + u32 saved_ip;
60310 + u32 gr_saddr;
60311 + u32 gr_daddr;
60312 + u16 gr_sport;
60313 + u16 gr_dport;
60314 + u8 used_accept:1;
60315 +#endif
60316 +
60317 #ifdef CONFIG_AUDIT
60318 unsigned audit_tty;
60319 struct tty_audit_buf *tty_audit_buf;
60320 @@ -710,6 +725,11 @@ struct user_struct {
60321 struct key *session_keyring; /* UID's default session keyring */
60322 #endif
60323
60324 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60325 + unsigned int banned;
60326 + unsigned long ban_expires;
60327 +#endif
60328 +
60329 /* Hash table maintenance information */
60330 struct hlist_node uidhash_node;
60331 uid_t uid;
60332 @@ -1340,8 +1360,8 @@ struct task_struct {
60333 struct list_head thread_group;
60334
60335 struct completion *vfork_done; /* for vfork() */
60336 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60337 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60338 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60339 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60340
60341 cputime_t utime, stime, utimescaled, stimescaled;
60342 cputime_t gtime;
60343 @@ -1357,13 +1377,6 @@ struct task_struct {
60344 struct task_cputime cputime_expires;
60345 struct list_head cpu_timers[3];
60346
60347 -/* process credentials */
60348 - const struct cred __rcu *real_cred; /* objective and real subjective task
60349 - * credentials (COW) */
60350 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60351 - * credentials (COW) */
60352 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60353 -
60354 char comm[TASK_COMM_LEN]; /* executable name excluding path
60355 - access with [gs]et_task_comm (which lock
60356 it with task_lock())
60357 @@ -1380,8 +1393,16 @@ struct task_struct {
60358 #endif
60359 /* CPU-specific state of this task */
60360 struct thread_struct thread;
60361 +/* thread_info moved to task_struct */
60362 +#ifdef CONFIG_X86
60363 + struct thread_info tinfo;
60364 +#endif
60365 /* filesystem information */
60366 struct fs_struct *fs;
60367 +
60368 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60369 + * credentials (COW) */
60370 +
60371 /* open file information */
60372 struct files_struct *files;
60373 /* namespaces */
60374 @@ -1428,6 +1449,11 @@ struct task_struct {
60375 struct rt_mutex_waiter *pi_blocked_on;
60376 #endif
60377
60378 +/* process credentials */
60379 + const struct cred __rcu *real_cred; /* objective and real subjective task
60380 + * credentials (COW) */
60381 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60382 +
60383 #ifdef CONFIG_DEBUG_MUTEXES
60384 /* mutex deadlock detection */
60385 struct mutex_waiter *blocked_on;
60386 @@ -1538,6 +1564,21 @@ struct task_struct {
60387 unsigned long default_timer_slack_ns;
60388
60389 struct list_head *scm_work_list;
60390 +
60391 +#ifdef CONFIG_GRKERNSEC
60392 + /* grsecurity */
60393 + struct dentry *gr_chroot_dentry;
60394 + struct acl_subject_label *acl;
60395 + struct acl_role_label *role;
60396 + struct file *exec_file;
60397 + u16 acl_role_id;
60398 + /* is this the task that authenticated to the special role */
60399 + u8 acl_sp_role;
60400 + u8 is_writable;
60401 + u8 brute;
60402 + u8 gr_is_chrooted;
60403 +#endif
60404 +
60405 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60406 /* Index of current stored address in ret_stack */
60407 int curr_ret_stack;
60408 @@ -1572,6 +1613,57 @@ struct task_struct {
60409 #endif
60410 };
60411
60412 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60413 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60414 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60415 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60416 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60417 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60418 +
60419 +#ifdef CONFIG_PAX_SOFTMODE
60420 +extern int pax_softmode;
60421 +#endif
60422 +
60423 +extern int pax_check_flags(unsigned long *);
60424 +
60425 +/* if tsk != current then task_lock must be held on it */
60426 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60427 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60428 +{
60429 + if (likely(tsk->mm))
60430 + return tsk->mm->pax_flags;
60431 + else
60432 + return 0UL;
60433 +}
60434 +
60435 +/* if tsk != current then task_lock must be held on it */
60436 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60437 +{
60438 + if (likely(tsk->mm)) {
60439 + tsk->mm->pax_flags = flags;
60440 + return 0;
60441 + }
60442 + return -EINVAL;
60443 +}
60444 +#endif
60445 +
60446 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60447 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60448 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60449 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60450 +#endif
60451 +
60452 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60453 +extern void pax_report_insns(void *pc, void *sp);
60454 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60455 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60456 +
60457 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60458 +extern void pax_track_stack(void);
60459 +#else
60460 +static inline void pax_track_stack(void) {}
60461 +#endif
60462 +
60463 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60464 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60465
60466 @@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
60467 #define PF_DUMPCORE 0x00000200 /* dumped core */
60468 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60469 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60470 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60471 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60472 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60473 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60474 @@ -2055,7 +2148,9 @@ void yield(void);
60475 extern struct exec_domain default_exec_domain;
60476
60477 union thread_union {
60478 +#ifndef CONFIG_X86
60479 struct thread_info thread_info;
60480 +#endif
60481 unsigned long stack[THREAD_SIZE/sizeof(long)];
60482 };
60483
60484 @@ -2088,6 +2183,7 @@ extern struct pid_namespace init_pid_ns;
60485 */
60486
60487 extern struct task_struct *find_task_by_vpid(pid_t nr);
60488 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60489 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60490 struct pid_namespace *ns);
60491
60492 @@ -2224,7 +2320,7 @@ extern void __cleanup_sighand(struct sig
60493 extern void exit_itimers(struct signal_struct *);
60494 extern void flush_itimer_signals(void);
60495
60496 -extern NORET_TYPE void do_group_exit(int);
60497 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60498
60499 extern void daemonize(const char *, ...);
60500 extern int allow_signal(int);
60501 @@ -2392,13 +2488,17 @@ static inline unsigned long *end_of_stac
60502
60503 #endif
60504
60505 -static inline int object_is_on_stack(void *obj)
60506 +static inline int object_starts_on_stack(void *obj)
60507 {
60508 - void *stack = task_stack_page(current);
60509 + const void *stack = task_stack_page(current);
60510
60511 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60512 }
60513
60514 +#ifdef CONFIG_PAX_USERCOPY
60515 +extern int object_is_on_stack(const void *obj, unsigned long len);
60516 +#endif
60517 +
60518 extern void thread_info_cache_init(void);
60519
60520 #ifdef CONFIG_DEBUG_STACK_USAGE
60521 diff -urNp linux-3.0.8/include/linux/screen_info.h linux-3.0.8/include/linux/screen_info.h
60522 --- linux-3.0.8/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
60523 +++ linux-3.0.8/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
60524 @@ -43,7 +43,8 @@ struct screen_info {
60525 __u16 pages; /* 0x32 */
60526 __u16 vesa_attributes; /* 0x34 */
60527 __u32 capabilities; /* 0x36 */
60528 - __u8 _reserved[6]; /* 0x3a */
60529 + __u16 vesapm_size; /* 0x3a */
60530 + __u8 _reserved[4]; /* 0x3c */
60531 } __attribute__((packed));
60532
60533 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60534 diff -urNp linux-3.0.8/include/linux/security.h linux-3.0.8/include/linux/security.h
60535 --- linux-3.0.8/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
60536 +++ linux-3.0.8/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
60537 @@ -36,6 +36,7 @@
60538 #include <linux/key.h>
60539 #include <linux/xfrm.h>
60540 #include <linux/slab.h>
60541 +#include <linux/grsecurity.h>
60542 #include <net/flow.h>
60543
60544 /* Maximum number of letters for an LSM name string */
60545 diff -urNp linux-3.0.8/include/linux/seq_file.h linux-3.0.8/include/linux/seq_file.h
60546 --- linux-3.0.8/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
60547 +++ linux-3.0.8/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
60548 @@ -32,6 +32,7 @@ struct seq_operations {
60549 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60550 int (*show) (struct seq_file *m, void *v);
60551 };
60552 +typedef struct seq_operations __no_const seq_operations_no_const;
60553
60554 #define SEQ_SKIP 1
60555
60556 diff -urNp linux-3.0.8/include/linux/shmem_fs.h linux-3.0.8/include/linux/shmem_fs.h
60557 --- linux-3.0.8/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
60558 +++ linux-3.0.8/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
60559 @@ -10,7 +10,7 @@
60560
60561 #define SHMEM_NR_DIRECT 16
60562
60563 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
60564 +#define SHMEM_SYMLINK_INLINE_LEN 64
60565
60566 struct shmem_inode_info {
60567 spinlock_t lock;
60568 diff -urNp linux-3.0.8/include/linux/shm.h linux-3.0.8/include/linux/shm.h
60569 --- linux-3.0.8/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
60570 +++ linux-3.0.8/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
60571 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60572 pid_t shm_cprid;
60573 pid_t shm_lprid;
60574 struct user_struct *mlock_user;
60575 +#ifdef CONFIG_GRKERNSEC
60576 + time_t shm_createtime;
60577 + pid_t shm_lapid;
60578 +#endif
60579 };
60580
60581 /* shm_mode upper byte flags */
60582 diff -urNp linux-3.0.8/include/linux/skbuff.h linux-3.0.8/include/linux/skbuff.h
60583 --- linux-3.0.8/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
60584 +++ linux-3.0.8/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
60585 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
60586 */
60587 static inline int skb_queue_empty(const struct sk_buff_head *list)
60588 {
60589 - return list->next == (struct sk_buff *)list;
60590 + return list->next == (const struct sk_buff *)list;
60591 }
60592
60593 /**
60594 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
60595 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60596 const struct sk_buff *skb)
60597 {
60598 - return skb->next == (struct sk_buff *)list;
60599 + return skb->next == (const struct sk_buff *)list;
60600 }
60601
60602 /**
60603 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
60604 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60605 const struct sk_buff *skb)
60606 {
60607 - return skb->prev == (struct sk_buff *)list;
60608 + return skb->prev == (const struct sk_buff *)list;
60609 }
60610
60611 /**
60612 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
60613 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60614 */
60615 #ifndef NET_SKB_PAD
60616 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60617 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60618 #endif
60619
60620 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60621 diff -urNp linux-3.0.8/include/linux/slab_def.h linux-3.0.8/include/linux/slab_def.h
60622 --- linux-3.0.8/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
60623 +++ linux-3.0.8/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
60624 @@ -96,10 +96,10 @@ struct kmem_cache {
60625 unsigned long node_allocs;
60626 unsigned long node_frees;
60627 unsigned long node_overflow;
60628 - atomic_t allochit;
60629 - atomic_t allocmiss;
60630 - atomic_t freehit;
60631 - atomic_t freemiss;
60632 + atomic_unchecked_t allochit;
60633 + atomic_unchecked_t allocmiss;
60634 + atomic_unchecked_t freehit;
60635 + atomic_unchecked_t freemiss;
60636
60637 /*
60638 * If debugging is enabled, then the allocator can add additional
60639 diff -urNp linux-3.0.8/include/linux/slab.h linux-3.0.8/include/linux/slab.h
60640 --- linux-3.0.8/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
60641 +++ linux-3.0.8/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
60642 @@ -11,12 +11,20 @@
60643
60644 #include <linux/gfp.h>
60645 #include <linux/types.h>
60646 +#include <linux/err.h>
60647
60648 /*
60649 * Flags to pass to kmem_cache_create().
60650 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60651 */
60652 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60653 +
60654 +#ifdef CONFIG_PAX_USERCOPY
60655 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60656 +#else
60657 +#define SLAB_USERCOPY 0x00000000UL
60658 +#endif
60659 +
60660 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60661 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60662 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60663 @@ -87,10 +95,13 @@
60664 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60665 * Both make kfree a no-op.
60666 */
60667 -#define ZERO_SIZE_PTR ((void *)16)
60668 +#define ZERO_SIZE_PTR \
60669 +({ \
60670 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60671 + (void *)(-MAX_ERRNO-1L); \
60672 +})
60673
60674 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60675 - (unsigned long)ZERO_SIZE_PTR)
60676 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60677
60678 /*
60679 * struct kmem_cache related prototypes
60680 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
60681 void kfree(const void *);
60682 void kzfree(const void *);
60683 size_t ksize(const void *);
60684 +void check_object_size(const void *ptr, unsigned long n, bool to);
60685
60686 /*
60687 * Allocator specific definitions. These are mainly used to establish optimized
60688 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
60689
60690 void __init kmem_cache_init_late(void);
60691
60692 +#define kmalloc(x, y) \
60693 +({ \
60694 + void *___retval; \
60695 + intoverflow_t ___x = (intoverflow_t)x; \
60696 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60697 + ___retval = NULL; \
60698 + else \
60699 + ___retval = kmalloc((size_t)___x, (y)); \
60700 + ___retval; \
60701 +})
60702 +
60703 +#define kmalloc_node(x, y, z) \
60704 +({ \
60705 + void *___retval; \
60706 + intoverflow_t ___x = (intoverflow_t)x; \
60707 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60708 + ___retval = NULL; \
60709 + else \
60710 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60711 + ___retval; \
60712 +})
60713 +
60714 +#define kzalloc(x, y) \
60715 +({ \
60716 + void *___retval; \
60717 + intoverflow_t ___x = (intoverflow_t)x; \
60718 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60719 + ___retval = NULL; \
60720 + else \
60721 + ___retval = kzalloc((size_t)___x, (y)); \
60722 + ___retval; \
60723 +})
60724 +
60725 +#define __krealloc(x, y, z) \
60726 +({ \
60727 + void *___retval; \
60728 + intoverflow_t ___y = (intoverflow_t)y; \
60729 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60730 + ___retval = NULL; \
60731 + else \
60732 + ___retval = __krealloc((x), (size_t)___y, (z)); \
60733 + ___retval; \
60734 +})
60735 +
60736 +#define krealloc(x, y, z) \
60737 +({ \
60738 + void *___retval; \
60739 + intoverflow_t ___y = (intoverflow_t)y; \
60740 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60741 + ___retval = NULL; \
60742 + else \
60743 + ___retval = krealloc((x), (size_t)___y, (z)); \
60744 + ___retval; \
60745 +})
60746 +
60747 #endif /* _LINUX_SLAB_H */
60748 diff -urNp linux-3.0.8/include/linux/slub_def.h linux-3.0.8/include/linux/slub_def.h
60749 --- linux-3.0.8/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
60750 +++ linux-3.0.8/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
60751 @@ -82,7 +82,7 @@ struct kmem_cache {
60752 struct kmem_cache_order_objects max;
60753 struct kmem_cache_order_objects min;
60754 gfp_t allocflags; /* gfp flags to use on each alloc */
60755 - int refcount; /* Refcount for slab cache destroy */
60756 + atomic_t refcount; /* Refcount for slab cache destroy */
60757 void (*ctor)(void *);
60758 int inuse; /* Offset to metadata */
60759 int align; /* Alignment */
60760 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
60761 }
60762
60763 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60764 -void *__kmalloc(size_t size, gfp_t flags);
60765 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60766
60767 static __always_inline void *
60768 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60769 diff -urNp linux-3.0.8/include/linux/sonet.h linux-3.0.8/include/linux/sonet.h
60770 --- linux-3.0.8/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
60771 +++ linux-3.0.8/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
60772 @@ -61,7 +61,7 @@ struct sonet_stats {
60773 #include <asm/atomic.h>
60774
60775 struct k_sonet_stats {
60776 -#define __HANDLE_ITEM(i) atomic_t i
60777 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60778 __SONET_ITEMS
60779 #undef __HANDLE_ITEM
60780 };
60781 diff -urNp linux-3.0.8/include/linux/sunrpc/clnt.h linux-3.0.8/include/linux/sunrpc/clnt.h
60782 --- linux-3.0.8/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
60783 +++ linux-3.0.8/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
60784 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60785 {
60786 switch (sap->sa_family) {
60787 case AF_INET:
60788 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60789 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60790 case AF_INET6:
60791 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60792 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60793 }
60794 return 0;
60795 }
60796 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60797 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60798 const struct sockaddr *src)
60799 {
60800 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60801 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60802 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60803
60804 dsin->sin_family = ssin->sin_family;
60805 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60806 if (sa->sa_family != AF_INET6)
60807 return 0;
60808
60809 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60810 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60811 }
60812
60813 #endif /* __KERNEL__ */
60814 diff -urNp linux-3.0.8/include/linux/sunrpc/svc_rdma.h linux-3.0.8/include/linux/sunrpc/svc_rdma.h
60815 --- linux-3.0.8/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
60816 +++ linux-3.0.8/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
60817 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60818 extern unsigned int svcrdma_max_requests;
60819 extern unsigned int svcrdma_max_req_size;
60820
60821 -extern atomic_t rdma_stat_recv;
60822 -extern atomic_t rdma_stat_read;
60823 -extern atomic_t rdma_stat_write;
60824 -extern atomic_t rdma_stat_sq_starve;
60825 -extern atomic_t rdma_stat_rq_starve;
60826 -extern atomic_t rdma_stat_rq_poll;
60827 -extern atomic_t rdma_stat_rq_prod;
60828 -extern atomic_t rdma_stat_sq_poll;
60829 -extern atomic_t rdma_stat_sq_prod;
60830 +extern atomic_unchecked_t rdma_stat_recv;
60831 +extern atomic_unchecked_t rdma_stat_read;
60832 +extern atomic_unchecked_t rdma_stat_write;
60833 +extern atomic_unchecked_t rdma_stat_sq_starve;
60834 +extern atomic_unchecked_t rdma_stat_rq_starve;
60835 +extern atomic_unchecked_t rdma_stat_rq_poll;
60836 +extern atomic_unchecked_t rdma_stat_rq_prod;
60837 +extern atomic_unchecked_t rdma_stat_sq_poll;
60838 +extern atomic_unchecked_t rdma_stat_sq_prod;
60839
60840 #define RPCRDMA_VERSION 1
60841
60842 diff -urNp linux-3.0.8/include/linux/sysctl.h linux-3.0.8/include/linux/sysctl.h
60843 --- linux-3.0.8/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
60844 +++ linux-3.0.8/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
60845 @@ -155,7 +155,11 @@ enum
60846 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60847 };
60848
60849 -
60850 +#ifdef CONFIG_PAX_SOFTMODE
60851 +enum {
60852 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60853 +};
60854 +#endif
60855
60856 /* CTL_VM names: */
60857 enum
60858 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60859
60860 extern int proc_dostring(struct ctl_table *, int,
60861 void __user *, size_t *, loff_t *);
60862 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60863 + void __user *, size_t *, loff_t *);
60864 extern int proc_dointvec(struct ctl_table *, int,
60865 void __user *, size_t *, loff_t *);
60866 extern int proc_dointvec_minmax(struct ctl_table *, int,
60867 diff -urNp linux-3.0.8/include/linux/tty_ldisc.h linux-3.0.8/include/linux/tty_ldisc.h
60868 --- linux-3.0.8/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
60869 +++ linux-3.0.8/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
60870 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60871
60872 struct module *owner;
60873
60874 - int refcount;
60875 + atomic_t refcount;
60876 };
60877
60878 struct tty_ldisc {
60879 diff -urNp linux-3.0.8/include/linux/types.h linux-3.0.8/include/linux/types.h
60880 --- linux-3.0.8/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
60881 +++ linux-3.0.8/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
60882 @@ -213,10 +213,26 @@ typedef struct {
60883 int counter;
60884 } atomic_t;
60885
60886 +#ifdef CONFIG_PAX_REFCOUNT
60887 +typedef struct {
60888 + int counter;
60889 +} atomic_unchecked_t;
60890 +#else
60891 +typedef atomic_t atomic_unchecked_t;
60892 +#endif
60893 +
60894 #ifdef CONFIG_64BIT
60895 typedef struct {
60896 long counter;
60897 } atomic64_t;
60898 +
60899 +#ifdef CONFIG_PAX_REFCOUNT
60900 +typedef struct {
60901 + long counter;
60902 +} atomic64_unchecked_t;
60903 +#else
60904 +typedef atomic64_t atomic64_unchecked_t;
60905 +#endif
60906 #endif
60907
60908 struct list_head {
60909 diff -urNp linux-3.0.8/include/linux/uaccess.h linux-3.0.8/include/linux/uaccess.h
60910 --- linux-3.0.8/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
60911 +++ linux-3.0.8/include/linux/uaccess.h 2011-10-06 04:17:55.000000000 -0400
60912 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60913 long ret; \
60914 mm_segment_t old_fs = get_fs(); \
60915 \
60916 - set_fs(KERNEL_DS); \
60917 pagefault_disable(); \
60918 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60919 - pagefault_enable(); \
60920 + set_fs(KERNEL_DS); \
60921 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60922 set_fs(old_fs); \
60923 + pagefault_enable(); \
60924 ret; \
60925 })
60926
60927 diff -urNp linux-3.0.8/include/linux/unaligned/access_ok.h linux-3.0.8/include/linux/unaligned/access_ok.h
60928 --- linux-3.0.8/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
60929 +++ linux-3.0.8/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
60930 @@ -6,32 +6,32 @@
60931
60932 static inline u16 get_unaligned_le16(const void *p)
60933 {
60934 - return le16_to_cpup((__le16 *)p);
60935 + return le16_to_cpup((const __le16 *)p);
60936 }
60937
60938 static inline u32 get_unaligned_le32(const void *p)
60939 {
60940 - return le32_to_cpup((__le32 *)p);
60941 + return le32_to_cpup((const __le32 *)p);
60942 }
60943
60944 static inline u64 get_unaligned_le64(const void *p)
60945 {
60946 - return le64_to_cpup((__le64 *)p);
60947 + return le64_to_cpup((const __le64 *)p);
60948 }
60949
60950 static inline u16 get_unaligned_be16(const void *p)
60951 {
60952 - return be16_to_cpup((__be16 *)p);
60953 + return be16_to_cpup((const __be16 *)p);
60954 }
60955
60956 static inline u32 get_unaligned_be32(const void *p)
60957 {
60958 - return be32_to_cpup((__be32 *)p);
60959 + return be32_to_cpup((const __be32 *)p);
60960 }
60961
60962 static inline u64 get_unaligned_be64(const void *p)
60963 {
60964 - return be64_to_cpup((__be64 *)p);
60965 + return be64_to_cpup((const __be64 *)p);
60966 }
60967
60968 static inline void put_unaligned_le16(u16 val, void *p)
60969 diff -urNp linux-3.0.8/include/linux/vermagic.h linux-3.0.8/include/linux/vermagic.h
60970 --- linux-3.0.8/include/linux/vermagic.h 2011-07-21 22:17:23.000000000 -0400
60971 +++ linux-3.0.8/include/linux/vermagic.h 2011-10-07 19:25:35.000000000 -0400
60972 @@ -26,9 +26,28 @@
60973 #define MODULE_ARCH_VERMAGIC ""
60974 #endif
60975
60976 +#ifdef CONFIG_PAX_REFCOUNT
60977 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
60978 +#else
60979 +#define MODULE_PAX_REFCOUNT ""
60980 +#endif
60981 +
60982 +#ifdef CONSTIFY_PLUGIN
60983 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60984 +#else
60985 +#define MODULE_CONSTIFY_PLUGIN ""
60986 +#endif
60987 +
60988 +#ifdef CONFIG_GRKERNSEC
60989 +#define MODULE_GRSEC "GRSEC "
60990 +#else
60991 +#define MODULE_GRSEC ""
60992 +#endif
60993 +
60994 #define VERMAGIC_STRING \
60995 UTS_RELEASE " " \
60996 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60997 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60998 - MODULE_ARCH_VERMAGIC
60999 + MODULE_ARCH_VERMAGIC \
61000 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_GRSEC
61001
61002 diff -urNp linux-3.0.8/include/linux/vmalloc.h linux-3.0.8/include/linux/vmalloc.h
61003 --- linux-3.0.8/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
61004 +++ linux-3.0.8/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
61005 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
61006 #define VM_MAP 0x00000004 /* vmap()ed pages */
61007 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61008 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61009 +
61010 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61011 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
61012 +#endif
61013 +
61014 /* bits [20..32] reserved for arch specific ioremap internals */
61015
61016 /*
61017 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
61018 # endif
61019 #endif
61020
61021 +#define vmalloc(x) \
61022 +({ \
61023 + void *___retval; \
61024 + intoverflow_t ___x = (intoverflow_t)x; \
61025 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61026 + ___retval = NULL; \
61027 + else \
61028 + ___retval = vmalloc((unsigned long)___x); \
61029 + ___retval; \
61030 +})
61031 +
61032 +#define vzalloc(x) \
61033 +({ \
61034 + void *___retval; \
61035 + intoverflow_t ___x = (intoverflow_t)x; \
61036 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61037 + ___retval = NULL; \
61038 + else \
61039 + ___retval = vzalloc((unsigned long)___x); \
61040 + ___retval; \
61041 +})
61042 +
61043 +#define __vmalloc(x, y, z) \
61044 +({ \
61045 + void *___retval; \
61046 + intoverflow_t ___x = (intoverflow_t)x; \
61047 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61048 + ___retval = NULL; \
61049 + else \
61050 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61051 + ___retval; \
61052 +})
61053 +
61054 +#define vmalloc_user(x) \
61055 +({ \
61056 + void *___retval; \
61057 + intoverflow_t ___x = (intoverflow_t)x; \
61058 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61059 + ___retval = NULL; \
61060 + else \
61061 + ___retval = vmalloc_user((unsigned long)___x); \
61062 + ___retval; \
61063 +})
61064 +
61065 +#define vmalloc_exec(x) \
61066 +({ \
61067 + void *___retval; \
61068 + intoverflow_t ___x = (intoverflow_t)x; \
61069 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61070 + ___retval = NULL; \
61071 + else \
61072 + ___retval = vmalloc_exec((unsigned long)___x); \
61073 + ___retval; \
61074 +})
61075 +
61076 +#define vmalloc_node(x, y) \
61077 +({ \
61078 + void *___retval; \
61079 + intoverflow_t ___x = (intoverflow_t)x; \
61080 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61081 + ___retval = NULL; \
61082 + else \
61083 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61084 + ___retval; \
61085 +})
61086 +
61087 +#define vzalloc_node(x, y) \
61088 +({ \
61089 + void *___retval; \
61090 + intoverflow_t ___x = (intoverflow_t)x; \
61091 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61092 + ___retval = NULL; \
61093 + else \
61094 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61095 + ___retval; \
61096 +})
61097 +
61098 +#define vmalloc_32(x) \
61099 +({ \
61100 + void *___retval; \
61101 + intoverflow_t ___x = (intoverflow_t)x; \
61102 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61103 + ___retval = NULL; \
61104 + else \
61105 + ___retval = vmalloc_32((unsigned long)___x); \
61106 + ___retval; \
61107 +})
61108 +
61109 +#define vmalloc_32_user(x) \
61110 +({ \
61111 +void *___retval; \
61112 + intoverflow_t ___x = (intoverflow_t)x; \
61113 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61114 + ___retval = NULL; \
61115 + else \
61116 + ___retval = vmalloc_32_user((unsigned long)___x);\
61117 + ___retval; \
61118 +})
61119 +
61120 #endif /* _LINUX_VMALLOC_H */
61121 diff -urNp linux-3.0.8/include/linux/vmstat.h linux-3.0.8/include/linux/vmstat.h
61122 --- linux-3.0.8/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
61123 +++ linux-3.0.8/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
61124 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
61125 /*
61126 * Zone based page accounting with per cpu differentials.
61127 */
61128 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61129 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61130
61131 static inline void zone_page_state_add(long x, struct zone *zone,
61132 enum zone_stat_item item)
61133 {
61134 - atomic_long_add(x, &zone->vm_stat[item]);
61135 - atomic_long_add(x, &vm_stat[item]);
61136 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61137 + atomic_long_add_unchecked(x, &vm_stat[item]);
61138 }
61139
61140 static inline unsigned long global_page_state(enum zone_stat_item item)
61141 {
61142 - long x = atomic_long_read(&vm_stat[item]);
61143 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61144 #ifdef CONFIG_SMP
61145 if (x < 0)
61146 x = 0;
61147 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
61148 static inline unsigned long zone_page_state(struct zone *zone,
61149 enum zone_stat_item item)
61150 {
61151 - long x = atomic_long_read(&zone->vm_stat[item]);
61152 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61153 #ifdef CONFIG_SMP
61154 if (x < 0)
61155 x = 0;
61156 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
61157 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61158 enum zone_stat_item item)
61159 {
61160 - long x = atomic_long_read(&zone->vm_stat[item]);
61161 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61162
61163 #ifdef CONFIG_SMP
61164 int cpu;
61165 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
61166
61167 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61168 {
61169 - atomic_long_inc(&zone->vm_stat[item]);
61170 - atomic_long_inc(&vm_stat[item]);
61171 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61172 + atomic_long_inc_unchecked(&vm_stat[item]);
61173 }
61174
61175 static inline void __inc_zone_page_state(struct page *page,
61176 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
61177
61178 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61179 {
61180 - atomic_long_dec(&zone->vm_stat[item]);
61181 - atomic_long_dec(&vm_stat[item]);
61182 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61183 + atomic_long_dec_unchecked(&vm_stat[item]);
61184 }
61185
61186 static inline void __dec_zone_page_state(struct page *page,
61187 diff -urNp linux-3.0.8/include/media/saa7146_vv.h linux-3.0.8/include/media/saa7146_vv.h
61188 --- linux-3.0.8/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
61189 +++ linux-3.0.8/include/media/saa7146_vv.h 2011-10-07 19:07:40.000000000 -0400
61190 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61191 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61192
61193 /* the extension can override this */
61194 - struct v4l2_ioctl_ops ops;
61195 + v4l2_ioctl_ops_no_const ops;
61196 /* pointer to the saa7146 core ops */
61197 const struct v4l2_ioctl_ops *core_ops;
61198
61199 diff -urNp linux-3.0.8/include/media/v4l2-dev.h linux-3.0.8/include/media/v4l2-dev.h
61200 --- linux-3.0.8/include/media/v4l2-dev.h 2011-07-21 22:17:23.000000000 -0400
61201 +++ linux-3.0.8/include/media/v4l2-dev.h 2011-10-07 19:07:40.000000000 -0400
61202 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
61203
61204
61205 struct v4l2_file_operations {
61206 - struct module *owner;
61207 + struct module * const owner;
61208 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61209 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61210 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61211 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61212 int (*open) (struct file *);
61213 int (*release) (struct file *);
61214 };
61215 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61216
61217 /*
61218 * Newer version of video_device, handled by videodev2.c
61219 diff -urNp linux-3.0.8/include/media/v4l2-ioctl.h linux-3.0.8/include/media/v4l2-ioctl.h
61220 --- linux-3.0.8/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
61221 +++ linux-3.0.8/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
61222 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
61223 long (*vidioc_default) (struct file *file, void *fh,
61224 bool valid_prio, int cmd, void *arg);
61225 };
61226 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61227
61228
61229 /* v4l debugging and diagnostics */
61230 diff -urNp linux-3.0.8/include/net/caif/cfctrl.h linux-3.0.8/include/net/caif/cfctrl.h
61231 --- linux-3.0.8/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
61232 +++ linux-3.0.8/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
61233 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61234 void (*radioset_rsp)(void);
61235 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61236 struct cflayer *client_layer);
61237 -};
61238 +} __no_const;
61239
61240 /* Link Setup Parameters for CAIF-Links. */
61241 struct cfctrl_link_param {
61242 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61243 struct cfctrl {
61244 struct cfsrvl serv;
61245 struct cfctrl_rsp res;
61246 - atomic_t req_seq_no;
61247 - atomic_t rsp_seq_no;
61248 + atomic_unchecked_t req_seq_no;
61249 + atomic_unchecked_t rsp_seq_no;
61250 struct list_head list;
61251 /* Protects from simultaneous access to first_req list */
61252 spinlock_t info_list_lock;
61253 diff -urNp linux-3.0.8/include/net/flow.h linux-3.0.8/include/net/flow.h
61254 --- linux-3.0.8/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
61255 +++ linux-3.0.8/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
61256 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
61257 u8 dir, flow_resolve_t resolver, void *ctx);
61258
61259 extern void flow_cache_flush(void);
61260 -extern atomic_t flow_cache_genid;
61261 +extern atomic_unchecked_t flow_cache_genid;
61262
61263 #endif
61264 diff -urNp linux-3.0.8/include/net/inetpeer.h linux-3.0.8/include/net/inetpeer.h
61265 --- linux-3.0.8/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
61266 +++ linux-3.0.8/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
61267 @@ -43,8 +43,8 @@ struct inet_peer {
61268 */
61269 union {
61270 struct {
61271 - atomic_t rid; /* Frag reception counter */
61272 - atomic_t ip_id_count; /* IP ID for the next packet */
61273 + atomic_unchecked_t rid; /* Frag reception counter */
61274 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61275 __u32 tcp_ts;
61276 __u32 tcp_ts_stamp;
61277 u32 metrics[RTAX_MAX];
61278 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
61279 {
61280 more++;
61281 inet_peer_refcheck(p);
61282 - return atomic_add_return(more, &p->ip_id_count) - more;
61283 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
61284 }
61285
61286 #endif /* _NET_INETPEER_H */
61287 diff -urNp linux-3.0.8/include/net/ip_fib.h linux-3.0.8/include/net/ip_fib.h
61288 --- linux-3.0.8/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
61289 +++ linux-3.0.8/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
61290 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
61291
61292 #define FIB_RES_SADDR(net, res) \
61293 ((FIB_RES_NH(res).nh_saddr_genid == \
61294 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61295 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61296 FIB_RES_NH(res).nh_saddr : \
61297 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61298 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61299 diff -urNp linux-3.0.8/include/net/ip_vs.h linux-3.0.8/include/net/ip_vs.h
61300 --- linux-3.0.8/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
61301 +++ linux-3.0.8/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
61302 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61303 struct ip_vs_conn *control; /* Master control connection */
61304 atomic_t n_control; /* Number of controlled ones */
61305 struct ip_vs_dest *dest; /* real server */
61306 - atomic_t in_pkts; /* incoming packet counter */
61307 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61308
61309 /* packet transmitter for different forwarding methods. If it
61310 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61311 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61312 __be16 port; /* port number of the server */
61313 union nf_inet_addr addr; /* IP address of the server */
61314 volatile unsigned flags; /* dest status flags */
61315 - atomic_t conn_flags; /* flags to copy to conn */
61316 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61317 atomic_t weight; /* server weight */
61318
61319 atomic_t refcnt; /* reference counter */
61320 diff -urNp linux-3.0.8/include/net/irda/ircomm_core.h linux-3.0.8/include/net/irda/ircomm_core.h
61321 --- linux-3.0.8/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
61322 +++ linux-3.0.8/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
61323 @@ -51,7 +51,7 @@ typedef struct {
61324 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61325 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61326 struct ircomm_info *);
61327 -} call_t;
61328 +} __no_const call_t;
61329
61330 struct ircomm_cb {
61331 irda_queue_t queue;
61332 diff -urNp linux-3.0.8/include/net/irda/ircomm_tty.h linux-3.0.8/include/net/irda/ircomm_tty.h
61333 --- linux-3.0.8/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
61334 +++ linux-3.0.8/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
61335 @@ -35,6 +35,7 @@
61336 #include <linux/termios.h>
61337 #include <linux/timer.h>
61338 #include <linux/tty.h> /* struct tty_struct */
61339 +#include <asm/local.h>
61340
61341 #include <net/irda/irias_object.h>
61342 #include <net/irda/ircomm_core.h>
61343 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61344 unsigned short close_delay;
61345 unsigned short closing_wait; /* time to wait before closing */
61346
61347 - int open_count;
61348 - int blocked_open; /* # of blocked opens */
61349 + local_t open_count;
61350 + local_t blocked_open; /* # of blocked opens */
61351
61352 /* Protect concurent access to :
61353 * o self->open_count
61354 diff -urNp linux-3.0.8/include/net/iucv/af_iucv.h linux-3.0.8/include/net/iucv/af_iucv.h
61355 --- linux-3.0.8/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
61356 +++ linux-3.0.8/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
61357 @@ -87,7 +87,7 @@ struct iucv_sock {
61358 struct iucv_sock_list {
61359 struct hlist_head head;
61360 rwlock_t lock;
61361 - atomic_t autobind_name;
61362 + atomic_unchecked_t autobind_name;
61363 };
61364
61365 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61366 diff -urNp linux-3.0.8/include/net/lapb.h linux-3.0.8/include/net/lapb.h
61367 --- linux-3.0.8/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
61368 +++ linux-3.0.8/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
61369 @@ -95,7 +95,7 @@ struct lapb_cb {
61370 struct sk_buff_head write_queue;
61371 struct sk_buff_head ack_queue;
61372 unsigned char window;
61373 - struct lapb_register_struct callbacks;
61374 + struct lapb_register_struct *callbacks;
61375
61376 /* FRMR control information */
61377 struct lapb_frame frmr_data;
61378 diff -urNp linux-3.0.8/include/net/neighbour.h linux-3.0.8/include/net/neighbour.h
61379 --- linux-3.0.8/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
61380 +++ linux-3.0.8/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
61381 @@ -124,7 +124,7 @@ struct neigh_ops {
61382 int (*connected_output)(struct sk_buff*);
61383 int (*hh_output)(struct sk_buff*);
61384 int (*queue_xmit)(struct sk_buff*);
61385 -};
61386 +} __do_const;
61387
61388 struct pneigh_entry {
61389 struct pneigh_entry *next;
61390 diff -urNp linux-3.0.8/include/net/netlink.h linux-3.0.8/include/net/netlink.h
61391 --- linux-3.0.8/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
61392 +++ linux-3.0.8/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
61393 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
61394 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61395 {
61396 if (mark)
61397 - skb_trim(skb, (unsigned char *) mark - skb->data);
61398 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61399 }
61400
61401 /**
61402 diff -urNp linux-3.0.8/include/net/netns/ipv4.h linux-3.0.8/include/net/netns/ipv4.h
61403 --- linux-3.0.8/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
61404 +++ linux-3.0.8/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
61405 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61406
61407 unsigned int sysctl_ping_group_range[2];
61408
61409 - atomic_t rt_genid;
61410 - atomic_t dev_addr_genid;
61411 + atomic_unchecked_t rt_genid;
61412 + atomic_unchecked_t dev_addr_genid;
61413
61414 #ifdef CONFIG_IP_MROUTE
61415 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61416 diff -urNp linux-3.0.8/include/net/sctp/sctp.h linux-3.0.8/include/net/sctp/sctp.h
61417 --- linux-3.0.8/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
61418 +++ linux-3.0.8/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
61419 @@ -315,9 +315,9 @@ do { \
61420
61421 #else /* SCTP_DEBUG */
61422
61423 -#define SCTP_DEBUG_PRINTK(whatever...)
61424 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61425 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61426 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61427 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61428 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61429 #define SCTP_ENABLE_DEBUG
61430 #define SCTP_DISABLE_DEBUG
61431 #define SCTP_ASSERT(expr, str, func)
61432 diff -urNp linux-3.0.8/include/net/sock.h linux-3.0.8/include/net/sock.h
61433 --- linux-3.0.8/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
61434 +++ linux-3.0.8/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
61435 @@ -277,7 +277,7 @@ struct sock {
61436 #ifdef CONFIG_RPS
61437 __u32 sk_rxhash;
61438 #endif
61439 - atomic_t sk_drops;
61440 + atomic_unchecked_t sk_drops;
61441 int sk_rcvbuf;
61442
61443 struct sk_filter __rcu *sk_filter;
61444 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
61445 }
61446
61447 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61448 - char __user *from, char *to,
61449 + char __user *from, unsigned char *to,
61450 int copy, int offset)
61451 {
61452 if (skb->ip_summed == CHECKSUM_NONE) {
61453 diff -urNp linux-3.0.8/include/net/tcp.h linux-3.0.8/include/net/tcp.h
61454 --- linux-3.0.8/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
61455 +++ linux-3.0.8/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
61456 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
61457 struct tcp_seq_afinfo {
61458 char *name;
61459 sa_family_t family;
61460 - struct file_operations seq_fops;
61461 - struct seq_operations seq_ops;
61462 + file_operations_no_const seq_fops;
61463 + seq_operations_no_const seq_ops;
61464 };
61465
61466 struct tcp_iter_state {
61467 diff -urNp linux-3.0.8/include/net/udp.h linux-3.0.8/include/net/udp.h
61468 --- linux-3.0.8/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
61469 +++ linux-3.0.8/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
61470 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
61471 char *name;
61472 sa_family_t family;
61473 struct udp_table *udp_table;
61474 - struct file_operations seq_fops;
61475 - struct seq_operations seq_ops;
61476 + file_operations_no_const seq_fops;
61477 + seq_operations_no_const seq_ops;
61478 };
61479
61480 struct udp_iter_state {
61481 diff -urNp linux-3.0.8/include/net/xfrm.h linux-3.0.8/include/net/xfrm.h
61482 --- linux-3.0.8/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
61483 +++ linux-3.0.8/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
61484 @@ -505,7 +505,7 @@ struct xfrm_policy {
61485 struct timer_list timer;
61486
61487 struct flow_cache_object flo;
61488 - atomic_t genid;
61489 + atomic_unchecked_t genid;
61490 u32 priority;
61491 u32 index;
61492 struct xfrm_mark mark;
61493 diff -urNp linux-3.0.8/include/rdma/iw_cm.h linux-3.0.8/include/rdma/iw_cm.h
61494 --- linux-3.0.8/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
61495 +++ linux-3.0.8/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
61496 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
61497 int backlog);
61498
61499 int (*destroy_listen)(struct iw_cm_id *cm_id);
61500 -};
61501 +} __no_const;
61502
61503 /**
61504 * iw_create_cm_id - Create an IW CM identifier.
61505 diff -urNp linux-3.0.8/include/scsi/libfc.h linux-3.0.8/include/scsi/libfc.h
61506 --- linux-3.0.8/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
61507 +++ linux-3.0.8/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
61508 @@ -750,6 +750,7 @@ struct libfc_function_template {
61509 */
61510 void (*disc_stop_final) (struct fc_lport *);
61511 };
61512 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61513
61514 /**
61515 * struct fc_disc - Discovery context
61516 @@ -853,7 +854,7 @@ struct fc_lport {
61517 struct fc_vport *vport;
61518
61519 /* Operational Information */
61520 - struct libfc_function_template tt;
61521 + libfc_function_template_no_const tt;
61522 u8 link_up;
61523 u8 qfull;
61524 enum fc_lport_state state;
61525 diff -urNp linux-3.0.8/include/scsi/scsi_device.h linux-3.0.8/include/scsi/scsi_device.h
61526 --- linux-3.0.8/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
61527 +++ linux-3.0.8/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
61528 @@ -161,9 +161,9 @@ struct scsi_device {
61529 unsigned int max_device_blocked; /* what device_blocked counts down from */
61530 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61531
61532 - atomic_t iorequest_cnt;
61533 - atomic_t iodone_cnt;
61534 - atomic_t ioerr_cnt;
61535 + atomic_unchecked_t iorequest_cnt;
61536 + atomic_unchecked_t iodone_cnt;
61537 + atomic_unchecked_t ioerr_cnt;
61538
61539 struct device sdev_gendev,
61540 sdev_dev;
61541 diff -urNp linux-3.0.8/include/scsi/scsi_transport_fc.h linux-3.0.8/include/scsi/scsi_transport_fc.h
61542 --- linux-3.0.8/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
61543 +++ linux-3.0.8/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
61544 @@ -711,7 +711,7 @@ struct fc_function_template {
61545 unsigned long show_host_system_hostname:1;
61546
61547 unsigned long disable_target_scan:1;
61548 -};
61549 +} __do_const;
61550
61551
61552 /**
61553 diff -urNp linux-3.0.8/include/sound/ak4xxx-adda.h linux-3.0.8/include/sound/ak4xxx-adda.h
61554 --- linux-3.0.8/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
61555 +++ linux-3.0.8/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
61556 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61557 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61558 unsigned char val);
61559 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61560 -};
61561 +} __no_const;
61562
61563 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61564
61565 diff -urNp linux-3.0.8/include/sound/hwdep.h linux-3.0.8/include/sound/hwdep.h
61566 --- linux-3.0.8/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
61567 +++ linux-3.0.8/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
61568 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61569 struct snd_hwdep_dsp_status *status);
61570 int (*dsp_load)(struct snd_hwdep *hw,
61571 struct snd_hwdep_dsp_image *image);
61572 -};
61573 +} __no_const;
61574
61575 struct snd_hwdep {
61576 struct snd_card *card;
61577 diff -urNp linux-3.0.8/include/sound/info.h linux-3.0.8/include/sound/info.h
61578 --- linux-3.0.8/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
61579 +++ linux-3.0.8/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
61580 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61581 struct snd_info_buffer *buffer);
61582 void (*write)(struct snd_info_entry *entry,
61583 struct snd_info_buffer *buffer);
61584 -};
61585 +} __no_const;
61586
61587 struct snd_info_entry_ops {
61588 int (*open)(struct snd_info_entry *entry,
61589 diff -urNp linux-3.0.8/include/sound/pcm.h linux-3.0.8/include/sound/pcm.h
61590 --- linux-3.0.8/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
61591 +++ linux-3.0.8/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
61592 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
61593 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61594 int (*ack)(struct snd_pcm_substream *substream);
61595 };
61596 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61597
61598 /*
61599 *
61600 diff -urNp linux-3.0.8/include/sound/sb16_csp.h linux-3.0.8/include/sound/sb16_csp.h
61601 --- linux-3.0.8/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
61602 +++ linux-3.0.8/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
61603 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61604 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61605 int (*csp_stop) (struct snd_sb_csp * p);
61606 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61607 -};
61608 +} __no_const;
61609
61610 /*
61611 * CSP private data
61612 diff -urNp linux-3.0.8/include/sound/soc.h linux-3.0.8/include/sound/soc.h
61613 --- linux-3.0.8/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
61614 +++ linux-3.0.8/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
61615 @@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
61616
61617 /* platform stream ops */
61618 struct snd_pcm_ops *ops;
61619 -};
61620 +} __do_const;
61621
61622 struct snd_soc_platform {
61623 const char *name;
61624 diff -urNp linux-3.0.8/include/sound/ymfpci.h linux-3.0.8/include/sound/ymfpci.h
61625 --- linux-3.0.8/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
61626 +++ linux-3.0.8/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
61627 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61628 spinlock_t reg_lock;
61629 spinlock_t voice_lock;
61630 wait_queue_head_t interrupt_sleep;
61631 - atomic_t interrupt_sleep_count;
61632 + atomic_unchecked_t interrupt_sleep_count;
61633 struct snd_info_entry *proc_entry;
61634 const struct firmware *dsp_microcode;
61635 const struct firmware *controller_microcode;
61636 diff -urNp linux-3.0.8/include/target/target_core_base.h linux-3.0.8/include/target/target_core_base.h
61637 --- linux-3.0.8/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
61638 +++ linux-3.0.8/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
61639 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
61640 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61641 int (*t10_pr_register)(struct se_cmd *);
61642 int (*t10_pr_clear)(struct se_cmd *);
61643 -};
61644 +} __no_const;
61645
61646 struct t10_reservation_template {
61647 /* Reservation effects all target ports */
61648 @@ -432,8 +432,8 @@ struct se_transport_task {
61649 atomic_t t_task_cdbs_left;
61650 atomic_t t_task_cdbs_ex_left;
61651 atomic_t t_task_cdbs_timeout_left;
61652 - atomic_t t_task_cdbs_sent;
61653 - atomic_t t_transport_aborted;
61654 + atomic_unchecked_t t_task_cdbs_sent;
61655 + atomic_unchecked_t t_transport_aborted;
61656 atomic_t t_transport_active;
61657 atomic_t t_transport_complete;
61658 atomic_t t_transport_queue_active;
61659 @@ -774,7 +774,7 @@ struct se_device {
61660 atomic_t active_cmds;
61661 atomic_t simple_cmds;
61662 atomic_t depth_left;
61663 - atomic_t dev_ordered_id;
61664 + atomic_unchecked_t dev_ordered_id;
61665 atomic_t dev_tur_active;
61666 atomic_t execute_tasks;
61667 atomic_t dev_status_thr_count;
61668 diff -urNp linux-3.0.8/include/trace/events/irq.h linux-3.0.8/include/trace/events/irq.h
61669 --- linux-3.0.8/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
61670 +++ linux-3.0.8/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
61671 @@ -36,7 +36,7 @@ struct softirq_action;
61672 */
61673 TRACE_EVENT(irq_handler_entry,
61674
61675 - TP_PROTO(int irq, struct irqaction *action),
61676 + TP_PROTO(int irq, const struct irqaction *action),
61677
61678 TP_ARGS(irq, action),
61679
61680 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61681 */
61682 TRACE_EVENT(irq_handler_exit,
61683
61684 - TP_PROTO(int irq, struct irqaction *action, int ret),
61685 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61686
61687 TP_ARGS(irq, action, ret),
61688
61689 diff -urNp linux-3.0.8/include/video/udlfb.h linux-3.0.8/include/video/udlfb.h
61690 --- linux-3.0.8/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
61691 +++ linux-3.0.8/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
61692 @@ -51,10 +51,10 @@ struct dlfb_data {
61693 int base8;
61694 u32 pseudo_palette[256];
61695 /* blit-only rendering path metrics, exposed through sysfs */
61696 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61697 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61698 - atomic_t bytes_sent; /* to usb, after compression including overhead */
61699 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61700 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61701 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61702 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61703 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61704 };
61705
61706 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61707 diff -urNp linux-3.0.8/include/video/uvesafb.h linux-3.0.8/include/video/uvesafb.h
61708 --- linux-3.0.8/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
61709 +++ linux-3.0.8/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
61710 @@ -177,6 +177,7 @@ struct uvesafb_par {
61711 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61712 u8 pmi_setpal; /* PMI for palette changes */
61713 u16 *pmi_base; /* protected mode interface location */
61714 + u8 *pmi_code; /* protected mode code location */
61715 void *pmi_start;
61716 void *pmi_pal;
61717 u8 *vbe_state_orig; /*
61718 diff -urNp linux-3.0.8/init/do_mounts.c linux-3.0.8/init/do_mounts.c
61719 --- linux-3.0.8/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
61720 +++ linux-3.0.8/init/do_mounts.c 2011-10-06 04:17:55.000000000 -0400
61721 @@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61722
61723 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61724 {
61725 - int err = sys_mount(name, "/root", fs, flags, data);
61726 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61727 if (err)
61728 return err;
61729
61730 - sys_chdir((const char __user __force *)"/root");
61731 + sys_chdir((const char __force_user*)"/root");
61732 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61733 printk(KERN_INFO
61734 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61735 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61736 va_start(args, fmt);
61737 vsprintf(buf, fmt, args);
61738 va_end(args);
61739 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61740 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61741 if (fd >= 0) {
61742 sys_ioctl(fd, FDEJECT, 0);
61743 sys_close(fd);
61744 }
61745 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61746 - fd = sys_open("/dev/console", O_RDWR, 0);
61747 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61748 if (fd >= 0) {
61749 sys_ioctl(fd, TCGETS, (long)&termios);
61750 termios.c_lflag &= ~ICANON;
61751 sys_ioctl(fd, TCSETSF, (long)&termios);
61752 - sys_read(fd, &c, 1);
61753 + sys_read(fd, (char __user *)&c, 1);
61754 termios.c_lflag |= ICANON;
61755 sys_ioctl(fd, TCSETSF, (long)&termios);
61756 sys_close(fd);
61757 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61758 mount_root();
61759 out:
61760 devtmpfs_mount("dev");
61761 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61762 - sys_chroot((const char __user __force *)".");
61763 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61764 + sys_chroot((const char __force_user *)".");
61765 }
61766 diff -urNp linux-3.0.8/init/do_mounts.h linux-3.0.8/init/do_mounts.h
61767 --- linux-3.0.8/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
61768 +++ linux-3.0.8/init/do_mounts.h 2011-10-06 04:17:55.000000000 -0400
61769 @@ -15,15 +15,15 @@ extern int root_mountflags;
61770
61771 static inline int create_dev(char *name, dev_t dev)
61772 {
61773 - sys_unlink(name);
61774 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61775 + sys_unlink((char __force_user *)name);
61776 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61777 }
61778
61779 #if BITS_PER_LONG == 32
61780 static inline u32 bstat(char *name)
61781 {
61782 struct stat64 stat;
61783 - if (sys_stat64(name, &stat) != 0)
61784 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61785 return 0;
61786 if (!S_ISBLK(stat.st_mode))
61787 return 0;
61788 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61789 static inline u32 bstat(char *name)
61790 {
61791 struct stat stat;
61792 - if (sys_newstat(name, &stat) != 0)
61793 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61794 return 0;
61795 if (!S_ISBLK(stat.st_mode))
61796 return 0;
61797 diff -urNp linux-3.0.8/init/do_mounts_initrd.c linux-3.0.8/init/do_mounts_initrd.c
61798 --- linux-3.0.8/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
61799 +++ linux-3.0.8/init/do_mounts_initrd.c 2011-10-06 04:17:55.000000000 -0400
61800 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61801 create_dev("/dev/root.old", Root_RAM0);
61802 /* mount initrd on rootfs' /root */
61803 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61804 - sys_mkdir("/old", 0700);
61805 - root_fd = sys_open("/", 0, 0);
61806 - old_fd = sys_open("/old", 0, 0);
61807 + sys_mkdir((const char __force_user *)"/old", 0700);
61808 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
61809 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61810 /* move initrd over / and chdir/chroot in initrd root */
61811 - sys_chdir("/root");
61812 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61813 - sys_chroot(".");
61814 + sys_chdir((const char __force_user *)"/root");
61815 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61816 + sys_chroot((const char __force_user *)".");
61817
61818 /*
61819 * In case that a resume from disk is carried out by linuxrc or one of
61820 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61821
61822 /* move initrd to rootfs' /old */
61823 sys_fchdir(old_fd);
61824 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61825 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61826 /* switch root and cwd back to / of rootfs */
61827 sys_fchdir(root_fd);
61828 - sys_chroot(".");
61829 + sys_chroot((const char __force_user *)".");
61830 sys_close(old_fd);
61831 sys_close(root_fd);
61832
61833 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61834 - sys_chdir("/old");
61835 + sys_chdir((const char __force_user *)"/old");
61836 return;
61837 }
61838
61839 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61840 mount_root();
61841
61842 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61843 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61844 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61845 if (!error)
61846 printk("okay\n");
61847 else {
61848 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61849 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61850 if (error == -ENOENT)
61851 printk("/initrd does not exist. Ignored.\n");
61852 else
61853 printk("failed\n");
61854 printk(KERN_NOTICE "Unmounting old root\n");
61855 - sys_umount("/old", MNT_DETACH);
61856 + sys_umount((char __force_user *)"/old", MNT_DETACH);
61857 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61858 if (fd < 0) {
61859 error = fd;
61860 @@ -116,11 +116,11 @@ int __init initrd_load(void)
61861 * mounted in the normal path.
61862 */
61863 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61864 - sys_unlink("/initrd.image");
61865 + sys_unlink((const char __force_user *)"/initrd.image");
61866 handle_initrd();
61867 return 1;
61868 }
61869 }
61870 - sys_unlink("/initrd.image");
61871 + sys_unlink((const char __force_user *)"/initrd.image");
61872 return 0;
61873 }
61874 diff -urNp linux-3.0.8/init/do_mounts_md.c linux-3.0.8/init/do_mounts_md.c
61875 --- linux-3.0.8/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
61876 +++ linux-3.0.8/init/do_mounts_md.c 2011-10-06 04:17:55.000000000 -0400
61877 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61878 partitioned ? "_d" : "", minor,
61879 md_setup_args[ent].device_names);
61880
61881 - fd = sys_open(name, 0, 0);
61882 + fd = sys_open((char __force_user *)name, 0, 0);
61883 if (fd < 0) {
61884 printk(KERN_ERR "md: open failed - cannot start "
61885 "array %s\n", name);
61886 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61887 * array without it
61888 */
61889 sys_close(fd);
61890 - fd = sys_open(name, 0, 0);
61891 + fd = sys_open((char __force_user *)name, 0, 0);
61892 sys_ioctl(fd, BLKRRPART, 0);
61893 }
61894 sys_close(fd);
61895 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61896
61897 wait_for_device_probe();
61898
61899 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61900 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61901 if (fd >= 0) {
61902 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61903 sys_close(fd);
61904 diff -urNp linux-3.0.8/init/initramfs.c linux-3.0.8/init/initramfs.c
61905 --- linux-3.0.8/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
61906 +++ linux-3.0.8/init/initramfs.c 2011-10-06 04:17:55.000000000 -0400
61907 @@ -74,7 +74,7 @@ static void __init free_hash(void)
61908 }
61909 }
61910
61911 -static long __init do_utime(char __user *filename, time_t mtime)
61912 +static long __init do_utime(__force char __user *filename, time_t mtime)
61913 {
61914 struct timespec t[2];
61915
61916 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
61917 struct dir_entry *de, *tmp;
61918 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61919 list_del(&de->list);
61920 - do_utime(de->name, de->mtime);
61921 + do_utime((char __force_user *)de->name, de->mtime);
61922 kfree(de->name);
61923 kfree(de);
61924 }
61925 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
61926 if (nlink >= 2) {
61927 char *old = find_link(major, minor, ino, mode, collected);
61928 if (old)
61929 - return (sys_link(old, collected) < 0) ? -1 : 1;
61930 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61931 }
61932 return 0;
61933 }
61934 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
61935 {
61936 struct stat st;
61937
61938 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61939 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61940 if (S_ISDIR(st.st_mode))
61941 - sys_rmdir(path);
61942 + sys_rmdir((char __force_user *)path);
61943 else
61944 - sys_unlink(path);
61945 + sys_unlink((char __force_user *)path);
61946 }
61947 }
61948
61949 @@ -305,7 +305,7 @@ static int __init do_name(void)
61950 int openflags = O_WRONLY|O_CREAT;
61951 if (ml != 1)
61952 openflags |= O_TRUNC;
61953 - wfd = sys_open(collected, openflags, mode);
61954 + wfd = sys_open((char __force_user *)collected, openflags, mode);
61955
61956 if (wfd >= 0) {
61957 sys_fchown(wfd, uid, gid);
61958 @@ -317,17 +317,17 @@ static int __init do_name(void)
61959 }
61960 }
61961 } else if (S_ISDIR(mode)) {
61962 - sys_mkdir(collected, mode);
61963 - sys_chown(collected, uid, gid);
61964 - sys_chmod(collected, mode);
61965 + sys_mkdir((char __force_user *)collected, mode);
61966 + sys_chown((char __force_user *)collected, uid, gid);
61967 + sys_chmod((char __force_user *)collected, mode);
61968 dir_add(collected, mtime);
61969 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61970 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61971 if (maybe_link() == 0) {
61972 - sys_mknod(collected, mode, rdev);
61973 - sys_chown(collected, uid, gid);
61974 - sys_chmod(collected, mode);
61975 - do_utime(collected, mtime);
61976 + sys_mknod((char __force_user *)collected, mode, rdev);
61977 + sys_chown((char __force_user *)collected, uid, gid);
61978 + sys_chmod((char __force_user *)collected, mode);
61979 + do_utime((char __force_user *)collected, mtime);
61980 }
61981 }
61982 return 0;
61983 @@ -336,15 +336,15 @@ static int __init do_name(void)
61984 static int __init do_copy(void)
61985 {
61986 if (count >= body_len) {
61987 - sys_write(wfd, victim, body_len);
61988 + sys_write(wfd, (char __force_user *)victim, body_len);
61989 sys_close(wfd);
61990 - do_utime(vcollected, mtime);
61991 + do_utime((char __force_user *)vcollected, mtime);
61992 kfree(vcollected);
61993 eat(body_len);
61994 state = SkipIt;
61995 return 0;
61996 } else {
61997 - sys_write(wfd, victim, count);
61998 + sys_write(wfd, (char __force_user *)victim, count);
61999 body_len -= count;
62000 eat(count);
62001 return 1;
62002 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62003 {
62004 collected[N_ALIGN(name_len) + body_len] = '\0';
62005 clean_path(collected, 0);
62006 - sys_symlink(collected + N_ALIGN(name_len), collected);
62007 - sys_lchown(collected, uid, gid);
62008 - do_utime(collected, mtime);
62009 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62010 + sys_lchown((char __force_user *)collected, uid, gid);
62011 + do_utime((char __force_user *)collected, mtime);
62012 state = SkipIt;
62013 next_state = Reset;
62014 return 0;
62015 diff -urNp linux-3.0.8/init/Kconfig linux-3.0.8/init/Kconfig
62016 --- linux-3.0.8/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
62017 +++ linux-3.0.8/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
62018 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
62019
62020 config COMPAT_BRK
62021 bool "Disable heap randomization"
62022 - default y
62023 + default n
62024 help
62025 Randomizing heap placement makes heap exploits harder, but it
62026 also breaks ancient binaries (including anything libc5 based).
62027 diff -urNp linux-3.0.8/init/main.c linux-3.0.8/init/main.c
62028 --- linux-3.0.8/init/main.c 2011-07-21 22:17:23.000000000 -0400
62029 +++ linux-3.0.8/init/main.c 2011-10-06 04:17:55.000000000 -0400
62030 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
62031 extern void tc_init(void);
62032 #endif
62033
62034 +extern void grsecurity_init(void);
62035 +
62036 /*
62037 * Debug helper: via this flag we know that we are in 'early bootup code'
62038 * where only the boot processor is running with IRQ disabled. This means
62039 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
62040
62041 __setup("reset_devices", set_reset_devices);
62042
62043 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62044 +extern char pax_enter_kernel_user[];
62045 +extern char pax_exit_kernel_user[];
62046 +extern pgdval_t clone_pgd_mask;
62047 +#endif
62048 +
62049 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62050 +static int __init setup_pax_nouderef(char *str)
62051 +{
62052 +#ifdef CONFIG_X86_32
62053 + unsigned int cpu;
62054 + struct desc_struct *gdt;
62055 +
62056 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
62057 + gdt = get_cpu_gdt_table(cpu);
62058 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62059 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62060 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62061 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62062 + }
62063 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62064 +#else
62065 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62066 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62067 + clone_pgd_mask = ~(pgdval_t)0UL;
62068 +#endif
62069 +
62070 + return 0;
62071 +}
62072 +early_param("pax_nouderef", setup_pax_nouderef);
62073 +#endif
62074 +
62075 +#ifdef CONFIG_PAX_SOFTMODE
62076 +int pax_softmode;
62077 +
62078 +static int __init setup_pax_softmode(char *str)
62079 +{
62080 + get_option(&str, &pax_softmode);
62081 + return 1;
62082 +}
62083 +__setup("pax_softmode=", setup_pax_softmode);
62084 +#endif
62085 +
62086 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62087 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62088 static const char *panic_later, *panic_param;
62089 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
62090 {
62091 int count = preempt_count();
62092 int ret;
62093 + const char *msg1 = "", *msg2 = "";
62094
62095 if (initcall_debug)
62096 ret = do_one_initcall_debug(fn);
62097 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
62098 sprintf(msgbuf, "error code %d ", ret);
62099
62100 if (preempt_count() != count) {
62101 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62102 + msg1 = " preemption imbalance";
62103 preempt_count() = count;
62104 }
62105 if (irqs_disabled()) {
62106 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62107 + msg2 = " disabled interrupts";
62108 local_irq_enable();
62109 }
62110 - if (msgbuf[0]) {
62111 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62112 + if (msgbuf[0] || *msg1 || *msg2) {
62113 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62114 }
62115
62116 return ret;
62117 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
62118 do_basic_setup();
62119
62120 /* Open the /dev/console on the rootfs, this should never fail */
62121 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62122 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62123 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62124
62125 (void) sys_dup(0);
62126 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
62127 if (!ramdisk_execute_command)
62128 ramdisk_execute_command = "/init";
62129
62130 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62131 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62132 ramdisk_execute_command = NULL;
62133 prepare_namespace();
62134 }
62135
62136 + grsecurity_init();
62137 +
62138 /*
62139 * Ok, we have completed the initial bootup, and
62140 * we're essentially up and running. Get rid of the
62141 diff -urNp linux-3.0.8/ipc/mqueue.c linux-3.0.8/ipc/mqueue.c
62142 --- linux-3.0.8/ipc/mqueue.c 2011-10-24 08:05:30.000000000 -0400
62143 +++ linux-3.0.8/ipc/mqueue.c 2011-10-16 21:59:31.000000000 -0400
62144 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
62145 mq_bytes = (mq_msg_tblsz +
62146 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62147
62148 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62149 spin_lock(&mq_lock);
62150 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62151 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62152 diff -urNp linux-3.0.8/ipc/msg.c linux-3.0.8/ipc/msg.c
62153 --- linux-3.0.8/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
62154 +++ linux-3.0.8/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
62155 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
62156 return security_msg_queue_associate(msq, msgflg);
62157 }
62158
62159 +static struct ipc_ops msg_ops = {
62160 + .getnew = newque,
62161 + .associate = msg_security,
62162 + .more_checks = NULL
62163 +};
62164 +
62165 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62166 {
62167 struct ipc_namespace *ns;
62168 - struct ipc_ops msg_ops;
62169 struct ipc_params msg_params;
62170
62171 ns = current->nsproxy->ipc_ns;
62172
62173 - msg_ops.getnew = newque;
62174 - msg_ops.associate = msg_security;
62175 - msg_ops.more_checks = NULL;
62176 -
62177 msg_params.key = key;
62178 msg_params.flg = msgflg;
62179
62180 diff -urNp linux-3.0.8/ipc/sem.c linux-3.0.8/ipc/sem.c
62181 --- linux-3.0.8/ipc/sem.c 2011-10-24 08:05:21.000000000 -0400
62182 +++ linux-3.0.8/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
62183 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
62184 return 0;
62185 }
62186
62187 +static struct ipc_ops sem_ops = {
62188 + .getnew = newary,
62189 + .associate = sem_security,
62190 + .more_checks = sem_more_checks
62191 +};
62192 +
62193 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62194 {
62195 struct ipc_namespace *ns;
62196 - struct ipc_ops sem_ops;
62197 struct ipc_params sem_params;
62198
62199 ns = current->nsproxy->ipc_ns;
62200 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62201 if (nsems < 0 || nsems > ns->sc_semmsl)
62202 return -EINVAL;
62203
62204 - sem_ops.getnew = newary;
62205 - sem_ops.associate = sem_security;
62206 - sem_ops.more_checks = sem_more_checks;
62207 -
62208 sem_params.key = key;
62209 sem_params.flg = semflg;
62210 sem_params.u.nsems = nsems;
62211 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
62212 int nsems;
62213 struct list_head tasks;
62214
62215 + pax_track_stack();
62216 +
62217 sma = sem_lock_check(ns, semid);
62218 if (IS_ERR(sma))
62219 return PTR_ERR(sma);
62220 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62221 struct ipc_namespace *ns;
62222 struct list_head tasks;
62223
62224 + pax_track_stack();
62225 +
62226 ns = current->nsproxy->ipc_ns;
62227
62228 if (nsops < 1 || semid < 0)
62229 diff -urNp linux-3.0.8/ipc/shm.c linux-3.0.8/ipc/shm.c
62230 --- linux-3.0.8/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
62231 +++ linux-3.0.8/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
62232 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
62233 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62234 #endif
62235
62236 +#ifdef CONFIG_GRKERNSEC
62237 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62238 + const time_t shm_createtime, const uid_t cuid,
62239 + const int shmid);
62240 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62241 + const time_t shm_createtime);
62242 +#endif
62243 +
62244 void shm_init_ns(struct ipc_namespace *ns)
62245 {
62246 ns->shm_ctlmax = SHMMAX;
62247 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
62248 shp->shm_lprid = 0;
62249 shp->shm_atim = shp->shm_dtim = 0;
62250 shp->shm_ctim = get_seconds();
62251 +#ifdef CONFIG_GRKERNSEC
62252 + {
62253 + struct timespec timeval;
62254 + do_posix_clock_monotonic_gettime(&timeval);
62255 +
62256 + shp->shm_createtime = timeval.tv_sec;
62257 + }
62258 +#endif
62259 shp->shm_segsz = size;
62260 shp->shm_nattch = 0;
62261 shp->shm_file = file;
62262 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
62263 return 0;
62264 }
62265
62266 +static struct ipc_ops shm_ops = {
62267 + .getnew = newseg,
62268 + .associate = shm_security,
62269 + .more_checks = shm_more_checks
62270 +};
62271 +
62272 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62273 {
62274 struct ipc_namespace *ns;
62275 - struct ipc_ops shm_ops;
62276 struct ipc_params shm_params;
62277
62278 ns = current->nsproxy->ipc_ns;
62279
62280 - shm_ops.getnew = newseg;
62281 - shm_ops.associate = shm_security;
62282 - shm_ops.more_checks = shm_more_checks;
62283 -
62284 shm_params.key = key;
62285 shm_params.flg = shmflg;
62286 shm_params.u.size = size;
62287 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
62288 case SHM_LOCK:
62289 case SHM_UNLOCK:
62290 {
62291 - struct file *uninitialized_var(shm_file);
62292 -
62293 lru_add_drain_all(); /* drain pagevecs to lru lists */
62294
62295 shp = shm_lock_check(ns, shmid);
62296 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
62297 if (err)
62298 goto out_unlock;
62299
62300 +#ifdef CONFIG_GRKERNSEC
62301 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62302 + shp->shm_perm.cuid, shmid) ||
62303 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62304 + err = -EACCES;
62305 + goto out_unlock;
62306 + }
62307 +#endif
62308 +
62309 path = shp->shm_file->f_path;
62310 path_get(&path);
62311 shp->shm_nattch++;
62312 +#ifdef CONFIG_GRKERNSEC
62313 + shp->shm_lapid = current->pid;
62314 +#endif
62315 size = i_size_read(path.dentry->d_inode);
62316 shm_unlock(shp);
62317
62318 diff -urNp linux-3.0.8/kernel/acct.c linux-3.0.8/kernel/acct.c
62319 --- linux-3.0.8/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
62320 +++ linux-3.0.8/kernel/acct.c 2011-10-06 04:17:55.000000000 -0400
62321 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
62322 */
62323 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62324 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62325 - file->f_op->write(file, (char *)&ac,
62326 + file->f_op->write(file, (char __force_user *)&ac,
62327 sizeof(acct_t), &file->f_pos);
62328 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62329 set_fs(fs);
62330 diff -urNp linux-3.0.8/kernel/audit.c linux-3.0.8/kernel/audit.c
62331 --- linux-3.0.8/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
62332 +++ linux-3.0.8/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
62333 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
62334 3) suppressed due to audit_rate_limit
62335 4) suppressed due to audit_backlog_limit
62336 */
62337 -static atomic_t audit_lost = ATOMIC_INIT(0);
62338 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62339
62340 /* The netlink socket. */
62341 static struct sock *audit_sock;
62342 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
62343 unsigned long now;
62344 int print;
62345
62346 - atomic_inc(&audit_lost);
62347 + atomic_inc_unchecked(&audit_lost);
62348
62349 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62350
62351 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
62352 printk(KERN_WARNING
62353 "audit: audit_lost=%d audit_rate_limit=%d "
62354 "audit_backlog_limit=%d\n",
62355 - atomic_read(&audit_lost),
62356 + atomic_read_unchecked(&audit_lost),
62357 audit_rate_limit,
62358 audit_backlog_limit);
62359 audit_panic(message);
62360 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
62361 status_set.pid = audit_pid;
62362 status_set.rate_limit = audit_rate_limit;
62363 status_set.backlog_limit = audit_backlog_limit;
62364 - status_set.lost = atomic_read(&audit_lost);
62365 + status_set.lost = atomic_read_unchecked(&audit_lost);
62366 status_set.backlog = skb_queue_len(&audit_skb_queue);
62367 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62368 &status_set, sizeof(status_set));
62369 diff -urNp linux-3.0.8/kernel/auditsc.c linux-3.0.8/kernel/auditsc.c
62370 --- linux-3.0.8/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
62371 +++ linux-3.0.8/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
62372 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
62373 }
62374
62375 /* global counter which is incremented every time something logs in */
62376 -static atomic_t session_id = ATOMIC_INIT(0);
62377 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62378
62379 /**
62380 * audit_set_loginuid - set a task's audit_context loginuid
62381 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
62382 */
62383 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62384 {
62385 - unsigned int sessionid = atomic_inc_return(&session_id);
62386 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62387 struct audit_context *context = task->audit_context;
62388
62389 if (context && context->in_syscall) {
62390 diff -urNp linux-3.0.8/kernel/capability.c linux-3.0.8/kernel/capability.c
62391 --- linux-3.0.8/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
62392 +++ linux-3.0.8/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
62393 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
62394 * before modification is attempted and the application
62395 * fails.
62396 */
62397 + if (tocopy > ARRAY_SIZE(kdata))
62398 + return -EFAULT;
62399 +
62400 if (copy_to_user(dataptr, kdata, tocopy
62401 * sizeof(struct __user_cap_data_struct))) {
62402 return -EFAULT;
62403 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
62404 BUG();
62405 }
62406
62407 - if (security_capable(ns, current_cred(), cap) == 0) {
62408 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62409 current->flags |= PF_SUPERPRIV;
62410 return true;
62411 }
62412 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
62413 }
62414 EXPORT_SYMBOL(ns_capable);
62415
62416 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
62417 +{
62418 + if (unlikely(!cap_valid(cap))) {
62419 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62420 + BUG();
62421 + }
62422 +
62423 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62424 + current->flags |= PF_SUPERPRIV;
62425 + return true;
62426 + }
62427 + return false;
62428 +}
62429 +EXPORT_SYMBOL(ns_capable_nolog);
62430 +
62431 +bool capable_nolog(int cap)
62432 +{
62433 + return ns_capable_nolog(&init_user_ns, cap);
62434 +}
62435 +EXPORT_SYMBOL(capable_nolog);
62436 +
62437 /**
62438 * task_ns_capable - Determine whether current task has a superior
62439 * capability targeted at a specific task's user namespace.
62440 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
62441 }
62442 EXPORT_SYMBOL(task_ns_capable);
62443
62444 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
62445 +{
62446 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62447 +}
62448 +EXPORT_SYMBOL(task_ns_capable_nolog);
62449 +
62450 /**
62451 * nsown_capable - Check superior capability to one's own user_ns
62452 * @cap: The capability in question
62453 diff -urNp linux-3.0.8/kernel/cgroup.c linux-3.0.8/kernel/cgroup.c
62454 --- linux-3.0.8/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
62455 +++ linux-3.0.8/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
62456 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
62457 struct hlist_head *hhead;
62458 struct cg_cgroup_link *link;
62459
62460 + pax_track_stack();
62461 +
62462 /* First see if we already have a cgroup group that matches
62463 * the desired set */
62464 read_lock(&css_set_lock);
62465 diff -urNp linux-3.0.8/kernel/compat.c linux-3.0.8/kernel/compat.c
62466 --- linux-3.0.8/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
62467 +++ linux-3.0.8/kernel/compat.c 2011-10-06 04:17:55.000000000 -0400
62468 @@ -13,6 +13,7 @@
62469
62470 #include <linux/linkage.h>
62471 #include <linux/compat.h>
62472 +#include <linux/module.h>
62473 #include <linux/errno.h>
62474 #include <linux/time.h>
62475 #include <linux/signal.h>
62476 @@ -166,7 +167,7 @@ static long compat_nanosleep_restart(str
62477 mm_segment_t oldfs;
62478 long ret;
62479
62480 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62481 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62482 oldfs = get_fs();
62483 set_fs(KERNEL_DS);
62484 ret = hrtimer_nanosleep_restart(restart);
62485 @@ -198,7 +199,7 @@ asmlinkage long compat_sys_nanosleep(str
62486 oldfs = get_fs();
62487 set_fs(KERNEL_DS);
62488 ret = hrtimer_nanosleep(&tu,
62489 - rmtp ? (struct timespec __user *)&rmt : NULL,
62490 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
62491 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62492 set_fs(oldfs);
62493
62494 @@ -307,7 +308,7 @@ asmlinkage long compat_sys_sigpending(co
62495 mm_segment_t old_fs = get_fs();
62496
62497 set_fs(KERNEL_DS);
62498 - ret = sys_sigpending((old_sigset_t __user *) &s);
62499 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
62500 set_fs(old_fs);
62501 if (ret == 0)
62502 ret = put_user(s, set);
62503 @@ -330,8 +331,8 @@ asmlinkage long compat_sys_sigprocmask(i
62504 old_fs = get_fs();
62505 set_fs(KERNEL_DS);
62506 ret = sys_sigprocmask(how,
62507 - set ? (old_sigset_t __user *) &s : NULL,
62508 - oset ? (old_sigset_t __user *) &s : NULL);
62509 + set ? (old_sigset_t __force_user *) &s : NULL,
62510 + oset ? (old_sigset_t __force_user *) &s : NULL);
62511 set_fs(old_fs);
62512 if (ret == 0)
62513 if (oset)
62514 @@ -368,7 +369,7 @@ asmlinkage long compat_sys_old_getrlimit
62515 mm_segment_t old_fs = get_fs();
62516
62517 set_fs(KERNEL_DS);
62518 - ret = sys_old_getrlimit(resource, &r);
62519 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62520 set_fs(old_fs);
62521
62522 if (!ret) {
62523 @@ -440,7 +441,7 @@ asmlinkage long compat_sys_getrusage(int
62524 mm_segment_t old_fs = get_fs();
62525
62526 set_fs(KERNEL_DS);
62527 - ret = sys_getrusage(who, (struct rusage __user *) &r);
62528 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62529 set_fs(old_fs);
62530
62531 if (ret)
62532 @@ -467,8 +468,8 @@ compat_sys_wait4(compat_pid_t pid, compa
62533 set_fs (KERNEL_DS);
62534 ret = sys_wait4(pid,
62535 (stat_addr ?
62536 - (unsigned int __user *) &status : NULL),
62537 - options, (struct rusage __user *) &r);
62538 + (unsigned int __force_user *) &status : NULL),
62539 + options, (struct rusage __force_user *) &r);
62540 set_fs (old_fs);
62541
62542 if (ret > 0) {
62543 @@ -493,8 +494,8 @@ asmlinkage long compat_sys_waitid(int wh
62544 memset(&info, 0, sizeof(info));
62545
62546 set_fs(KERNEL_DS);
62547 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62548 - uru ? (struct rusage __user *)&ru : NULL);
62549 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62550 + uru ? (struct rusage __force_user *)&ru : NULL);
62551 set_fs(old_fs);
62552
62553 if ((ret < 0) || (info.si_signo == 0))
62554 @@ -624,8 +625,8 @@ long compat_sys_timer_settime(timer_t ti
62555 oldfs = get_fs();
62556 set_fs(KERNEL_DS);
62557 err = sys_timer_settime(timer_id, flags,
62558 - (struct itimerspec __user *) &newts,
62559 - (struct itimerspec __user *) &oldts);
62560 + (struct itimerspec __force_user *) &newts,
62561 + (struct itimerspec __force_user *) &oldts);
62562 set_fs(oldfs);
62563 if (!err && old && put_compat_itimerspec(old, &oldts))
62564 return -EFAULT;
62565 @@ -642,7 +643,7 @@ long compat_sys_timer_gettime(timer_t ti
62566 oldfs = get_fs();
62567 set_fs(KERNEL_DS);
62568 err = sys_timer_gettime(timer_id,
62569 - (struct itimerspec __user *) &ts);
62570 + (struct itimerspec __force_user *) &ts);
62571 set_fs(oldfs);
62572 if (!err && put_compat_itimerspec(setting, &ts))
62573 return -EFAULT;
62574 @@ -661,7 +662,7 @@ long compat_sys_clock_settime(clockid_t
62575 oldfs = get_fs();
62576 set_fs(KERNEL_DS);
62577 err = sys_clock_settime(which_clock,
62578 - (struct timespec __user *) &ts);
62579 + (struct timespec __force_user *) &ts);
62580 set_fs(oldfs);
62581 return err;
62582 }
62583 @@ -676,7 +677,7 @@ long compat_sys_clock_gettime(clockid_t
62584 oldfs = get_fs();
62585 set_fs(KERNEL_DS);
62586 err = sys_clock_gettime(which_clock,
62587 - (struct timespec __user *) &ts);
62588 + (struct timespec __force_user *) &ts);
62589 set_fs(oldfs);
62590 if (!err && put_compat_timespec(&ts, tp))
62591 return -EFAULT;
62592 @@ -696,7 +697,7 @@ long compat_sys_clock_adjtime(clockid_t
62593
62594 oldfs = get_fs();
62595 set_fs(KERNEL_DS);
62596 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62597 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62598 set_fs(oldfs);
62599
62600 err = compat_put_timex(utp, &txc);
62601 @@ -716,7 +717,7 @@ long compat_sys_clock_getres(clockid_t w
62602 oldfs = get_fs();
62603 set_fs(KERNEL_DS);
62604 err = sys_clock_getres(which_clock,
62605 - (struct timespec __user *) &ts);
62606 + (struct timespec __force_user *) &ts);
62607 set_fs(oldfs);
62608 if (!err && tp && put_compat_timespec(&ts, tp))
62609 return -EFAULT;
62610 @@ -728,9 +729,9 @@ static long compat_clock_nanosleep_resta
62611 long err;
62612 mm_segment_t oldfs;
62613 struct timespec tu;
62614 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62615 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62616
62617 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62618 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62619 oldfs = get_fs();
62620 set_fs(KERNEL_DS);
62621 err = clock_nanosleep_restart(restart);
62622 @@ -762,8 +763,8 @@ long compat_sys_clock_nanosleep(clockid_
62623 oldfs = get_fs();
62624 set_fs(KERNEL_DS);
62625 err = sys_clock_nanosleep(which_clock, flags,
62626 - (struct timespec __user *) &in,
62627 - (struct timespec __user *) &out);
62628 + (struct timespec __force_user *) &in,
62629 + (struct timespec __force_user *) &out);
62630 set_fs(oldfs);
62631
62632 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62633 diff -urNp linux-3.0.8/kernel/configs.c linux-3.0.8/kernel/configs.c
62634 --- linux-3.0.8/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
62635 +++ linux-3.0.8/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
62636 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62637 struct proc_dir_entry *entry;
62638
62639 /* create the current config file */
62640 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62641 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62642 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62643 + &ikconfig_file_ops);
62644 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62645 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62646 + &ikconfig_file_ops);
62647 +#endif
62648 +#else
62649 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62650 &ikconfig_file_ops);
62651 +#endif
62652 +
62653 if (!entry)
62654 return -ENOMEM;
62655
62656 diff -urNp linux-3.0.8/kernel/cred.c linux-3.0.8/kernel/cred.c
62657 --- linux-3.0.8/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
62658 +++ linux-3.0.8/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
62659 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62660 */
62661 void __put_cred(struct cred *cred)
62662 {
62663 + pax_track_stack();
62664 +
62665 kdebug("__put_cred(%p{%d,%d})", cred,
62666 atomic_read(&cred->usage),
62667 read_cred_subscribers(cred));
62668 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62669 {
62670 struct cred *cred;
62671
62672 + pax_track_stack();
62673 +
62674 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62675 atomic_read(&tsk->cred->usage),
62676 read_cred_subscribers(tsk->cred));
62677 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62678 {
62679 const struct cred *cred;
62680
62681 + pax_track_stack();
62682 +
62683 rcu_read_lock();
62684
62685 do {
62686 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62687 {
62688 struct cred *new;
62689
62690 + pax_track_stack();
62691 +
62692 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62693 if (!new)
62694 return NULL;
62695 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62696 const struct cred *old;
62697 struct cred *new;
62698
62699 + pax_track_stack();
62700 +
62701 validate_process_creds();
62702
62703 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62704 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62705 struct thread_group_cred *tgcred = NULL;
62706 struct cred *new;
62707
62708 + pax_track_stack();
62709 +
62710 #ifdef CONFIG_KEYS
62711 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62712 if (!tgcred)
62713 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62714 struct cred *new;
62715 int ret;
62716
62717 + pax_track_stack();
62718 +
62719 if (
62720 #ifdef CONFIG_KEYS
62721 !p->cred->thread_keyring &&
62722 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62723 struct task_struct *task = current;
62724 const struct cred *old = task->real_cred;
62725
62726 + pax_track_stack();
62727 +
62728 kdebug("commit_creds(%p{%d,%d})", new,
62729 atomic_read(&new->usage),
62730 read_cred_subscribers(new));
62731 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62732
62733 get_cred(new); /* we will require a ref for the subj creds too */
62734
62735 + gr_set_role_label(task, new->uid, new->gid);
62736 +
62737 /* dumpability changes */
62738 if (old->euid != new->euid ||
62739 old->egid != new->egid ||
62740 @@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
62741 key_fsgid_changed(task);
62742
62743 /* do it
62744 - * - What if a process setreuid()'s and this brings the
62745 - * new uid over his NPROC rlimit? We can check this now
62746 - * cheaply with the new uid cache, so if it matters
62747 - * we should be checking for it. -DaveM
62748 + * RLIMIT_NPROC limits on user->processes have already been checked
62749 + * in set_user().
62750 */
62751 alter_cred_subscribers(new, 2);
62752 if (new->user != old->user)
62753 @@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62754 */
62755 void abort_creds(struct cred *new)
62756 {
62757 + pax_track_stack();
62758 +
62759 kdebug("abort_creds(%p{%d,%d})", new,
62760 atomic_read(&new->usage),
62761 read_cred_subscribers(new));
62762 @@ -574,6 +592,8 @@ const struct cred *override_creds(const
62763 {
62764 const struct cred *old = current->cred;
62765
62766 + pax_track_stack();
62767 +
62768 kdebug("override_creds(%p{%d,%d})", new,
62769 atomic_read(&new->usage),
62770 read_cred_subscribers(new));
62771 @@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
62772 {
62773 const struct cred *override = current->cred;
62774
62775 + pax_track_stack();
62776 +
62777 kdebug("revert_creds(%p{%d,%d})", old,
62778 atomic_read(&old->usage),
62779 read_cred_subscribers(old));
62780 @@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62781 const struct cred *old;
62782 struct cred *new;
62783
62784 + pax_track_stack();
62785 +
62786 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62787 if (!new)
62788 return NULL;
62789 @@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62790 */
62791 int set_security_override(struct cred *new, u32 secid)
62792 {
62793 + pax_track_stack();
62794 +
62795 return security_kernel_act_as(new, secid);
62796 }
62797 EXPORT_SYMBOL(set_security_override);
62798 @@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
62799 u32 secid;
62800 int ret;
62801
62802 + pax_track_stack();
62803 +
62804 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62805 if (ret < 0)
62806 return ret;
62807 diff -urNp linux-3.0.8/kernel/debug/debug_core.c linux-3.0.8/kernel/debug/debug_core.c
62808 --- linux-3.0.8/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
62809 +++ linux-3.0.8/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
62810 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62811 */
62812 static atomic_t masters_in_kgdb;
62813 static atomic_t slaves_in_kgdb;
62814 -static atomic_t kgdb_break_tasklet_var;
62815 +static atomic_unchecked_t kgdb_break_tasklet_var;
62816 atomic_t kgdb_setting_breakpoint;
62817
62818 struct task_struct *kgdb_usethread;
62819 @@ -129,7 +129,7 @@ int kgdb_single_step;
62820 static pid_t kgdb_sstep_pid;
62821
62822 /* to keep track of the CPU which is doing the single stepping*/
62823 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62824 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62825
62826 /*
62827 * If you are debugging a problem where roundup (the collection of
62828 @@ -542,7 +542,7 @@ return_normal:
62829 * kernel will only try for the value of sstep_tries before
62830 * giving up and continuing on.
62831 */
62832 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62833 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62834 (kgdb_info[cpu].task &&
62835 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62836 atomic_set(&kgdb_active, -1);
62837 @@ -636,8 +636,8 @@ cpu_master_loop:
62838 }
62839
62840 kgdb_restore:
62841 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62842 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62843 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62844 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62845 if (kgdb_info[sstep_cpu].task)
62846 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62847 else
62848 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62849 static void kgdb_tasklet_bpt(unsigned long ing)
62850 {
62851 kgdb_breakpoint();
62852 - atomic_set(&kgdb_break_tasklet_var, 0);
62853 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62854 }
62855
62856 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62857
62858 void kgdb_schedule_breakpoint(void)
62859 {
62860 - if (atomic_read(&kgdb_break_tasklet_var) ||
62861 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62862 atomic_read(&kgdb_active) != -1 ||
62863 atomic_read(&kgdb_setting_breakpoint))
62864 return;
62865 - atomic_inc(&kgdb_break_tasklet_var);
62866 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
62867 tasklet_schedule(&kgdb_tasklet_breakpoint);
62868 }
62869 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62870 diff -urNp linux-3.0.8/kernel/debug/kdb/kdb_main.c linux-3.0.8/kernel/debug/kdb/kdb_main.c
62871 --- linux-3.0.8/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
62872 +++ linux-3.0.8/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
62873 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62874 list_for_each_entry(mod, kdb_modules, list) {
62875
62876 kdb_printf("%-20s%8u 0x%p ", mod->name,
62877 - mod->core_size, (void *)mod);
62878 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
62879 #ifdef CONFIG_MODULE_UNLOAD
62880 kdb_printf("%4d ", module_refcount(mod));
62881 #endif
62882 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62883 kdb_printf(" (Loading)");
62884 else
62885 kdb_printf(" (Live)");
62886 - kdb_printf(" 0x%p", mod->module_core);
62887 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62888
62889 #ifdef CONFIG_MODULE_UNLOAD
62890 {
62891 diff -urNp linux-3.0.8/kernel/events/core.c linux-3.0.8/kernel/events/core.c
62892 --- linux-3.0.8/kernel/events/core.c 2011-10-24 08:05:21.000000000 -0400
62893 +++ linux-3.0.8/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
62894 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
62895 return 0;
62896 }
62897
62898 -static atomic64_t perf_event_id;
62899 +static atomic64_unchecked_t perf_event_id;
62900
62901 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62902 enum event_type_t event_type);
62903 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
62904
62905 static inline u64 perf_event_count(struct perf_event *event)
62906 {
62907 - return local64_read(&event->count) + atomic64_read(&event->child_count);
62908 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62909 }
62910
62911 static u64 perf_event_read(struct perf_event *event)
62912 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
62913 mutex_lock(&event->child_mutex);
62914 total += perf_event_read(event);
62915 *enabled += event->total_time_enabled +
62916 - atomic64_read(&event->child_total_time_enabled);
62917 + atomic64_read_unchecked(&event->child_total_time_enabled);
62918 *running += event->total_time_running +
62919 - atomic64_read(&event->child_total_time_running);
62920 + atomic64_read_unchecked(&event->child_total_time_running);
62921
62922 list_for_each_entry(child, &event->child_list, child_list) {
62923 total += perf_event_read(child);
62924 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
62925 userpg->offset -= local64_read(&event->hw.prev_count);
62926
62927 userpg->time_enabled = event->total_time_enabled +
62928 - atomic64_read(&event->child_total_time_enabled);
62929 + atomic64_read_unchecked(&event->child_total_time_enabled);
62930
62931 userpg->time_running = event->total_time_running +
62932 - atomic64_read(&event->child_total_time_running);
62933 + atomic64_read_unchecked(&event->child_total_time_running);
62934
62935 barrier();
62936 ++userpg->lock;
62937 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
62938 values[n++] = perf_event_count(event);
62939 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62940 values[n++] = enabled +
62941 - atomic64_read(&event->child_total_time_enabled);
62942 + atomic64_read_unchecked(&event->child_total_time_enabled);
62943 }
62944 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62945 values[n++] = running +
62946 - atomic64_read(&event->child_total_time_running);
62947 + atomic64_read_unchecked(&event->child_total_time_running);
62948 }
62949 if (read_format & PERF_FORMAT_ID)
62950 values[n++] = primary_event_id(event);
62951 @@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
62952 * need to add enough zero bytes after the string to handle
62953 * the 64bit alignment we do later.
62954 */
62955 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62956 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
62957 if (!buf) {
62958 name = strncpy(tmp, "//enomem", sizeof(tmp));
62959 goto got_name;
62960 }
62961 - name = d_path(&file->f_path, buf, PATH_MAX);
62962 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62963 if (IS_ERR(name)) {
62964 name = strncpy(tmp, "//toolong", sizeof(tmp));
62965 goto got_name;
62966 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
62967 event->parent = parent_event;
62968
62969 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62970 - event->id = atomic64_inc_return(&perf_event_id);
62971 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
62972
62973 event->state = PERF_EVENT_STATE_INACTIVE;
62974
62975 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
62976 /*
62977 * Add back the child's count to the parent's count:
62978 */
62979 - atomic64_add(child_val, &parent_event->child_count);
62980 - atomic64_add(child_event->total_time_enabled,
62981 + atomic64_add_unchecked(child_val, &parent_event->child_count);
62982 + atomic64_add_unchecked(child_event->total_time_enabled,
62983 &parent_event->child_total_time_enabled);
62984 - atomic64_add(child_event->total_time_running,
62985 + atomic64_add_unchecked(child_event->total_time_running,
62986 &parent_event->child_total_time_running);
62987
62988 /*
62989 diff -urNp linux-3.0.8/kernel/exit.c linux-3.0.8/kernel/exit.c
62990 --- linux-3.0.8/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
62991 +++ linux-3.0.8/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
62992 @@ -57,6 +57,10 @@
62993 #include <asm/pgtable.h>
62994 #include <asm/mmu_context.h>
62995
62996 +#ifdef CONFIG_GRKERNSEC
62997 +extern rwlock_t grsec_exec_file_lock;
62998 +#endif
62999 +
63000 static void exit_mm(struct task_struct * tsk);
63001
63002 static void __unhash_process(struct task_struct *p, bool group_dead)
63003 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
63004 struct task_struct *leader;
63005 int zap_leader;
63006 repeat:
63007 +#ifdef CONFIG_NET
63008 + gr_del_task_from_ip_table(p);
63009 +#endif
63010 +
63011 tracehook_prepare_release_task(p);
63012 /* don't need to get the RCU readlock here - the process is dead and
63013 * can't be modifying its own credentials. But shut RCU-lockdep up */
63014 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
63015 {
63016 write_lock_irq(&tasklist_lock);
63017
63018 +#ifdef CONFIG_GRKERNSEC
63019 + write_lock(&grsec_exec_file_lock);
63020 + if (current->exec_file) {
63021 + fput(current->exec_file);
63022 + current->exec_file = NULL;
63023 + }
63024 + write_unlock(&grsec_exec_file_lock);
63025 +#endif
63026 +
63027 ptrace_unlink(current);
63028 /* Reparent to init */
63029 current->real_parent = current->parent = kthreadd_task;
63030 list_move_tail(&current->sibling, &current->real_parent->children);
63031
63032 + gr_set_kernel_label(current);
63033 +
63034 /* Set the exit signal to SIGCHLD so we signal init on exit */
63035 current->exit_signal = SIGCHLD;
63036
63037 @@ -394,7 +413,7 @@ int allow_signal(int sig)
63038 * know it'll be handled, so that they don't get converted to
63039 * SIGKILL or just silently dropped.
63040 */
63041 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63042 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63043 recalc_sigpending();
63044 spin_unlock_irq(&current->sighand->siglock);
63045 return 0;
63046 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
63047 vsnprintf(current->comm, sizeof(current->comm), name, args);
63048 va_end(args);
63049
63050 +#ifdef CONFIG_GRKERNSEC
63051 + write_lock(&grsec_exec_file_lock);
63052 + if (current->exec_file) {
63053 + fput(current->exec_file);
63054 + current->exec_file = NULL;
63055 + }
63056 + write_unlock(&grsec_exec_file_lock);
63057 +#endif
63058 +
63059 + gr_set_kernel_label(current);
63060 +
63061 /*
63062 * If we were started as result of loading a module, close all of the
63063 * user space pages. We don't need them, and if we didn't close them
63064 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
63065 struct task_struct *tsk = current;
63066 int group_dead;
63067
63068 - profile_task_exit(tsk);
63069 -
63070 - WARN_ON(atomic_read(&tsk->fs_excl));
63071 - WARN_ON(blk_needs_flush_plug(tsk));
63072 -
63073 if (unlikely(in_interrupt()))
63074 panic("Aiee, killing interrupt handler!");
63075 - if (unlikely(!tsk->pid))
63076 - panic("Attempted to kill the idle task!");
63077
63078 /*
63079 * If do_exit is called because this processes oopsed, it's possible
63080 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
63081 */
63082 set_fs(USER_DS);
63083
63084 + profile_task_exit(tsk);
63085 +
63086 + WARN_ON(atomic_read(&tsk->fs_excl));
63087 + WARN_ON(blk_needs_flush_plug(tsk));
63088 +
63089 + if (unlikely(!tsk->pid))
63090 + panic("Attempted to kill the idle task!");
63091 +
63092 tracehook_report_exit(&code);
63093
63094 validate_creds_for_do_exit(tsk);
63095 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
63096 tsk->exit_code = code;
63097 taskstats_exit(tsk, group_dead);
63098
63099 + gr_acl_handle_psacct(tsk, code);
63100 + gr_acl_handle_exit();
63101 +
63102 exit_mm(tsk);
63103
63104 if (group_dead)
63105 diff -urNp linux-3.0.8/kernel/fork.c linux-3.0.8/kernel/fork.c
63106 --- linux-3.0.8/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
63107 +++ linux-3.0.8/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
63108 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
63109 *stackend = STACK_END_MAGIC; /* for overflow detection */
63110
63111 #ifdef CONFIG_CC_STACKPROTECTOR
63112 - tsk->stack_canary = get_random_int();
63113 + tsk->stack_canary = pax_get_random_long();
63114 #endif
63115
63116 /* One for us, one for whoever does the "release_task()" (usually parent) */
63117 @@ -308,13 +308,77 @@ out:
63118 }
63119
63120 #ifdef CONFIG_MMU
63121 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63122 +{
63123 + struct vm_area_struct *tmp;
63124 + unsigned long charge;
63125 + struct mempolicy *pol;
63126 + struct file *file;
63127 +
63128 + charge = 0;
63129 + if (mpnt->vm_flags & VM_ACCOUNT) {
63130 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63131 + if (security_vm_enough_memory(len))
63132 + goto fail_nomem;
63133 + charge = len;
63134 + }
63135 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63136 + if (!tmp)
63137 + goto fail_nomem;
63138 + *tmp = *mpnt;
63139 + tmp->vm_mm = mm;
63140 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63141 + pol = mpol_dup(vma_policy(mpnt));
63142 + if (IS_ERR(pol))
63143 + goto fail_nomem_policy;
63144 + vma_set_policy(tmp, pol);
63145 + if (anon_vma_fork(tmp, mpnt))
63146 + goto fail_nomem_anon_vma_fork;
63147 + tmp->vm_flags &= ~VM_LOCKED;
63148 + tmp->vm_next = tmp->vm_prev = NULL;
63149 + tmp->vm_mirror = NULL;
63150 + file = tmp->vm_file;
63151 + if (file) {
63152 + struct inode *inode = file->f_path.dentry->d_inode;
63153 + struct address_space *mapping = file->f_mapping;
63154 +
63155 + get_file(file);
63156 + if (tmp->vm_flags & VM_DENYWRITE)
63157 + atomic_dec(&inode->i_writecount);
63158 + mutex_lock(&mapping->i_mmap_mutex);
63159 + if (tmp->vm_flags & VM_SHARED)
63160 + mapping->i_mmap_writable++;
63161 + flush_dcache_mmap_lock(mapping);
63162 + /* insert tmp into the share list, just after mpnt */
63163 + vma_prio_tree_add(tmp, mpnt);
63164 + flush_dcache_mmap_unlock(mapping);
63165 + mutex_unlock(&mapping->i_mmap_mutex);
63166 + }
63167 +
63168 + /*
63169 + * Clear hugetlb-related page reserves for children. This only
63170 + * affects MAP_PRIVATE mappings. Faults generated by the child
63171 + * are not guaranteed to succeed, even if read-only
63172 + */
63173 + if (is_vm_hugetlb_page(tmp))
63174 + reset_vma_resv_huge_pages(tmp);
63175 +
63176 + return tmp;
63177 +
63178 +fail_nomem_anon_vma_fork:
63179 + mpol_put(pol);
63180 +fail_nomem_policy:
63181 + kmem_cache_free(vm_area_cachep, tmp);
63182 +fail_nomem:
63183 + vm_unacct_memory(charge);
63184 + return NULL;
63185 +}
63186 +
63187 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63188 {
63189 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63190 struct rb_node **rb_link, *rb_parent;
63191 int retval;
63192 - unsigned long charge;
63193 - struct mempolicy *pol;
63194
63195 down_write(&oldmm->mmap_sem);
63196 flush_cache_dup_mm(oldmm);
63197 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
63198 mm->locked_vm = 0;
63199 mm->mmap = NULL;
63200 mm->mmap_cache = NULL;
63201 - mm->free_area_cache = oldmm->mmap_base;
63202 - mm->cached_hole_size = ~0UL;
63203 + mm->free_area_cache = oldmm->free_area_cache;
63204 + mm->cached_hole_size = oldmm->cached_hole_size;
63205 mm->map_count = 0;
63206 cpumask_clear(mm_cpumask(mm));
63207 mm->mm_rb = RB_ROOT;
63208 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
63209
63210 prev = NULL;
63211 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63212 - struct file *file;
63213 -
63214 if (mpnt->vm_flags & VM_DONTCOPY) {
63215 long pages = vma_pages(mpnt);
63216 mm->total_vm -= pages;
63217 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
63218 -pages);
63219 continue;
63220 }
63221 - charge = 0;
63222 - if (mpnt->vm_flags & VM_ACCOUNT) {
63223 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63224 - if (security_vm_enough_memory(len))
63225 - goto fail_nomem;
63226 - charge = len;
63227 - }
63228 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63229 - if (!tmp)
63230 - goto fail_nomem;
63231 - *tmp = *mpnt;
63232 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63233 - pol = mpol_dup(vma_policy(mpnt));
63234 - retval = PTR_ERR(pol);
63235 - if (IS_ERR(pol))
63236 - goto fail_nomem_policy;
63237 - vma_set_policy(tmp, pol);
63238 - tmp->vm_mm = mm;
63239 - if (anon_vma_fork(tmp, mpnt))
63240 - goto fail_nomem_anon_vma_fork;
63241 - tmp->vm_flags &= ~VM_LOCKED;
63242 - tmp->vm_next = tmp->vm_prev = NULL;
63243 - file = tmp->vm_file;
63244 - if (file) {
63245 - struct inode *inode = file->f_path.dentry->d_inode;
63246 - struct address_space *mapping = file->f_mapping;
63247 -
63248 - get_file(file);
63249 - if (tmp->vm_flags & VM_DENYWRITE)
63250 - atomic_dec(&inode->i_writecount);
63251 - mutex_lock(&mapping->i_mmap_mutex);
63252 - if (tmp->vm_flags & VM_SHARED)
63253 - mapping->i_mmap_writable++;
63254 - flush_dcache_mmap_lock(mapping);
63255 - /* insert tmp into the share list, just after mpnt */
63256 - vma_prio_tree_add(tmp, mpnt);
63257 - flush_dcache_mmap_unlock(mapping);
63258 - mutex_unlock(&mapping->i_mmap_mutex);
63259 + tmp = dup_vma(mm, mpnt);
63260 + if (!tmp) {
63261 + retval = -ENOMEM;
63262 + goto out;
63263 }
63264
63265 /*
63266 - * Clear hugetlb-related page reserves for children. This only
63267 - * affects MAP_PRIVATE mappings. Faults generated by the child
63268 - * are not guaranteed to succeed, even if read-only
63269 - */
63270 - if (is_vm_hugetlb_page(tmp))
63271 - reset_vma_resv_huge_pages(tmp);
63272 -
63273 - /*
63274 * Link in the new vma and copy the page table entries.
63275 */
63276 *pprev = tmp;
63277 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
63278 if (retval)
63279 goto out;
63280 }
63281 +
63282 +#ifdef CONFIG_PAX_SEGMEXEC
63283 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63284 + struct vm_area_struct *mpnt_m;
63285 +
63286 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63287 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63288 +
63289 + if (!mpnt->vm_mirror)
63290 + continue;
63291 +
63292 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63293 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63294 + mpnt->vm_mirror = mpnt_m;
63295 + } else {
63296 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63297 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63298 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63299 + mpnt->vm_mirror->vm_mirror = mpnt;
63300 + }
63301 + }
63302 + BUG_ON(mpnt_m);
63303 + }
63304 +#endif
63305 +
63306 /* a new mm has just been created */
63307 arch_dup_mmap(oldmm, mm);
63308 retval = 0;
63309 @@ -429,14 +474,6 @@ out:
63310 flush_tlb_mm(oldmm);
63311 up_write(&oldmm->mmap_sem);
63312 return retval;
63313 -fail_nomem_anon_vma_fork:
63314 - mpol_put(pol);
63315 -fail_nomem_policy:
63316 - kmem_cache_free(vm_area_cachep, tmp);
63317 -fail_nomem:
63318 - retval = -ENOMEM;
63319 - vm_unacct_memory(charge);
63320 - goto out;
63321 }
63322
63323 static inline int mm_alloc_pgd(struct mm_struct * mm)
63324 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
63325 spin_unlock(&fs->lock);
63326 return -EAGAIN;
63327 }
63328 - fs->users++;
63329 + atomic_inc(&fs->users);
63330 spin_unlock(&fs->lock);
63331 return 0;
63332 }
63333 tsk->fs = copy_fs_struct(fs);
63334 if (!tsk->fs)
63335 return -ENOMEM;
63336 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63337 return 0;
63338 }
63339
63340 @@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
63341 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63342 #endif
63343 retval = -EAGAIN;
63344 +
63345 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63346 +
63347 if (atomic_read(&p->real_cred->user->processes) >=
63348 task_rlimit(p, RLIMIT_NPROC)) {
63349 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63350 - p->real_cred->user != INIT_USER)
63351 + if (p->real_cred->user != INIT_USER &&
63352 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
63353 goto bad_fork_free;
63354 }
63355 + current->flags &= ~PF_NPROC_EXCEEDED;
63356
63357 retval = copy_creds(p, clone_flags);
63358 if (retval < 0)
63359 @@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
63360 if (clone_flags & CLONE_THREAD)
63361 p->tgid = current->tgid;
63362
63363 + gr_copy_label(p);
63364 +
63365 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63366 /*
63367 * Clear TID on mm_release()?
63368 @@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
63369 bad_fork_free:
63370 free_task(p);
63371 fork_out:
63372 + gr_log_forkfail(retval);
63373 +
63374 return ERR_PTR(retval);
63375 }
63376
63377 @@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
63378 if (clone_flags & CLONE_PARENT_SETTID)
63379 put_user(nr, parent_tidptr);
63380
63381 + gr_handle_brute_check();
63382 +
63383 if (clone_flags & CLONE_VFORK) {
63384 p->vfork_done = &vfork;
63385 init_completion(&vfork);
63386 @@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
63387 return 0;
63388
63389 /* don't need lock here; in the worst case we'll do useless copy */
63390 - if (fs->users == 1)
63391 + if (atomic_read(&fs->users) == 1)
63392 return 0;
63393
63394 *new_fsp = copy_fs_struct(fs);
63395 @@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63396 fs = current->fs;
63397 spin_lock(&fs->lock);
63398 current->fs = new_fs;
63399 - if (--fs->users)
63400 + gr_set_chroot_entries(current, &current->fs->root);
63401 + if (atomic_dec_return(&fs->users))
63402 new_fs = NULL;
63403 else
63404 new_fs = fs;
63405 diff -urNp linux-3.0.8/kernel/futex.c linux-3.0.8/kernel/futex.c
63406 --- linux-3.0.8/kernel/futex.c 2011-10-24 08:05:21.000000000 -0400
63407 +++ linux-3.0.8/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
63408 @@ -54,6 +54,7 @@
63409 #include <linux/mount.h>
63410 #include <linux/pagemap.h>
63411 #include <linux/syscalls.h>
63412 +#include <linux/ptrace.h>
63413 #include <linux/signal.h>
63414 #include <linux/module.h>
63415 #include <linux/magic.h>
63416 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63417 struct page *page, *page_head;
63418 int err, ro = 0;
63419
63420 +#ifdef CONFIG_PAX_SEGMEXEC
63421 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63422 + return -EFAULT;
63423 +#endif
63424 +
63425 /*
63426 * The futex address must be "naturally" aligned.
63427 */
63428 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
63429 struct futex_q q = futex_q_init;
63430 int ret;
63431
63432 + pax_track_stack();
63433 +
63434 if (!bitset)
63435 return -EINVAL;
63436 q.bitset = bitset;
63437 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
63438 struct futex_q q = futex_q_init;
63439 int res, ret;
63440
63441 + pax_track_stack();
63442 +
63443 if (!bitset)
63444 return -EINVAL;
63445
63446 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63447 {
63448 struct robust_list_head __user *head;
63449 unsigned long ret;
63450 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63451 const struct cred *cred = current_cred(), *pcred;
63452 +#endif
63453
63454 if (!futex_cmpxchg_enabled)
63455 return -ENOSYS;
63456 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63457 if (!p)
63458 goto err_unlock;
63459 ret = -EPERM;
63460 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63461 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63462 + goto err_unlock;
63463 +#else
63464 pcred = __task_cred(p);
63465 /* If victim is in different user_ns, then uids are not
63466 comparable, so we must have CAP_SYS_PTRACE */
63467 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63468 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63469 goto err_unlock;
63470 ok:
63471 +#endif
63472 head = p->robust_list;
63473 rcu_read_unlock();
63474 }
63475 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
63476 {
63477 u32 curval;
63478 int i;
63479 + mm_segment_t oldfs;
63480
63481 /*
63482 * This will fail and we want it. Some arch implementations do
63483 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
63484 * implementation, the non-functional ones will return
63485 * -ENOSYS.
63486 */
63487 + oldfs = get_fs();
63488 + set_fs(USER_DS);
63489 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63490 futex_cmpxchg_enabled = 1;
63491 + set_fs(oldfs);
63492
63493 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63494 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
63495 diff -urNp linux-3.0.8/kernel/futex_compat.c linux-3.0.8/kernel/futex_compat.c
63496 --- linux-3.0.8/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
63497 +++ linux-3.0.8/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
63498 @@ -10,6 +10,7 @@
63499 #include <linux/compat.h>
63500 #include <linux/nsproxy.h>
63501 #include <linux/futex.h>
63502 +#include <linux/ptrace.h>
63503
63504 #include <asm/uaccess.h>
63505
63506 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
63507 {
63508 struct compat_robust_list_head __user *head;
63509 unsigned long ret;
63510 - const struct cred *cred = current_cred(), *pcred;
63511 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63512 + const struct cred *cred = current_cred();
63513 + const struct cred *pcred;
63514 +#endif
63515
63516 if (!futex_cmpxchg_enabled)
63517 return -ENOSYS;
63518 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
63519 if (!p)
63520 goto err_unlock;
63521 ret = -EPERM;
63522 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63523 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63524 + goto err_unlock;
63525 +#else
63526 pcred = __task_cred(p);
63527 /* If victim is in different user_ns, then uids are not
63528 comparable, so we must have CAP_SYS_PTRACE */
63529 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
63530 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63531 goto err_unlock;
63532 ok:
63533 +#endif
63534 head = p->compat_robust_list;
63535 rcu_read_unlock();
63536 }
63537 diff -urNp linux-3.0.8/kernel/gcov/base.c linux-3.0.8/kernel/gcov/base.c
63538 --- linux-3.0.8/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
63539 +++ linux-3.0.8/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
63540 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63541 }
63542
63543 #ifdef CONFIG_MODULES
63544 -static inline int within(void *addr, void *start, unsigned long size)
63545 -{
63546 - return ((addr >= start) && (addr < start + size));
63547 -}
63548 -
63549 /* Update list and generate events when modules are unloaded. */
63550 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63551 void *data)
63552 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63553 prev = NULL;
63554 /* Remove entries located in module from linked list. */
63555 for (info = gcov_info_head; info; info = info->next) {
63556 - if (within(info, mod->module_core, mod->core_size)) {
63557 + if (within_module_core_rw((unsigned long)info, mod)) {
63558 if (prev)
63559 prev->next = info->next;
63560 else
63561 diff -urNp linux-3.0.8/kernel/hrtimer.c linux-3.0.8/kernel/hrtimer.c
63562 --- linux-3.0.8/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
63563 +++ linux-3.0.8/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
63564 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63565 local_irq_restore(flags);
63566 }
63567
63568 -static void run_hrtimer_softirq(struct softirq_action *h)
63569 +static void run_hrtimer_softirq(void)
63570 {
63571 hrtimer_peek_ahead_timers();
63572 }
63573 diff -urNp linux-3.0.8/kernel/jump_label.c linux-3.0.8/kernel/jump_label.c
63574 --- linux-3.0.8/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
63575 +++ linux-3.0.8/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
63576 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
63577
63578 size = (((unsigned long)stop - (unsigned long)start)
63579 / sizeof(struct jump_entry));
63580 + pax_open_kernel();
63581 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63582 + pax_close_kernel();
63583 }
63584
63585 static void jump_label_update(struct jump_label_key *key, int enable);
63586 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
63587 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63588 struct jump_entry *iter;
63589
63590 + pax_open_kernel();
63591 for (iter = iter_start; iter < iter_stop; iter++) {
63592 if (within_module_init(iter->code, mod))
63593 iter->code = 0;
63594 }
63595 + pax_close_kernel();
63596 }
63597
63598 static int
63599 diff -urNp linux-3.0.8/kernel/kallsyms.c linux-3.0.8/kernel/kallsyms.c
63600 --- linux-3.0.8/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
63601 +++ linux-3.0.8/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
63602 @@ -11,6 +11,9 @@
63603 * Changed the compression method from stem compression to "table lookup"
63604 * compression (see scripts/kallsyms.c for a more complete description)
63605 */
63606 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63607 +#define __INCLUDED_BY_HIDESYM 1
63608 +#endif
63609 #include <linux/kallsyms.h>
63610 #include <linux/module.h>
63611 #include <linux/init.h>
63612 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
63613
63614 static inline int is_kernel_inittext(unsigned long addr)
63615 {
63616 + if (system_state != SYSTEM_BOOTING)
63617 + return 0;
63618 +
63619 if (addr >= (unsigned long)_sinittext
63620 && addr <= (unsigned long)_einittext)
63621 return 1;
63622 return 0;
63623 }
63624
63625 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63626 +#ifdef CONFIG_MODULES
63627 +static inline int is_module_text(unsigned long addr)
63628 +{
63629 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63630 + return 1;
63631 +
63632 + addr = ktla_ktva(addr);
63633 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63634 +}
63635 +#else
63636 +static inline int is_module_text(unsigned long addr)
63637 +{
63638 + return 0;
63639 +}
63640 +#endif
63641 +#endif
63642 +
63643 static inline int is_kernel_text(unsigned long addr)
63644 {
63645 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63646 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63647
63648 static inline int is_kernel(unsigned long addr)
63649 {
63650 +
63651 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63652 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63653 + return 1;
63654 +
63655 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63656 +#else
63657 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63658 +#endif
63659 +
63660 return 1;
63661 return in_gate_area_no_mm(addr);
63662 }
63663
63664 static int is_ksym_addr(unsigned long addr)
63665 {
63666 +
63667 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63668 + if (is_module_text(addr))
63669 + return 0;
63670 +#endif
63671 +
63672 if (all_var)
63673 return is_kernel(addr);
63674
63675 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63676
63677 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63678 {
63679 - iter->name[0] = '\0';
63680 iter->nameoff = get_symbol_offset(new_pos);
63681 iter->pos = new_pos;
63682 }
63683 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63684 {
63685 struct kallsym_iter *iter = m->private;
63686
63687 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63688 + if (current_uid())
63689 + return 0;
63690 +#endif
63691 +
63692 /* Some debugging symbols have no name. Ignore them. */
63693 if (!iter->name[0])
63694 return 0;
63695 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63696 struct kallsym_iter *iter;
63697 int ret;
63698
63699 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63700 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63701 if (!iter)
63702 return -ENOMEM;
63703 reset_iter(iter, 0);
63704 diff -urNp linux-3.0.8/kernel/kexec.c linux-3.0.8/kernel/kexec.c
63705 --- linux-3.0.8/kernel/kexec.c 2011-07-21 22:17:23.000000000 -0400
63706 +++ linux-3.0.8/kernel/kexec.c 2011-10-06 04:17:55.000000000 -0400
63707 @@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63708 unsigned long flags)
63709 {
63710 struct compat_kexec_segment in;
63711 - struct kexec_segment out, __user *ksegments;
63712 + struct kexec_segment out;
63713 + struct kexec_segment __user *ksegments;
63714 unsigned long i, result;
63715
63716 /* Don't allow clients that don't understand the native
63717 diff -urNp linux-3.0.8/kernel/kmod.c linux-3.0.8/kernel/kmod.c
63718 --- linux-3.0.8/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
63719 +++ linux-3.0.8/kernel/kmod.c 2011-10-06 04:17:55.000000000 -0400
63720 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63721 * If module auto-loading support is disabled then this function
63722 * becomes a no-operation.
63723 */
63724 -int __request_module(bool wait, const char *fmt, ...)
63725 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63726 {
63727 - va_list args;
63728 char module_name[MODULE_NAME_LEN];
63729 unsigned int max_modprobes;
63730 int ret;
63731 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63732 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63733 static char *envp[] = { "HOME=/",
63734 "TERM=linux",
63735 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63736 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63737 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63738 static int kmod_loop_msg;
63739
63740 - va_start(args, fmt);
63741 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63742 - va_end(args);
63743 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63744 if (ret >= MODULE_NAME_LEN)
63745 return -ENAMETOOLONG;
63746
63747 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63748 if (ret)
63749 return ret;
63750
63751 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63752 + if (!current_uid()) {
63753 + /* hack to workaround consolekit/udisks stupidity */
63754 + read_lock(&tasklist_lock);
63755 + if (!strcmp(current->comm, "mount") &&
63756 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63757 + read_unlock(&tasklist_lock);
63758 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63759 + return -EPERM;
63760 + }
63761 + read_unlock(&tasklist_lock);
63762 + }
63763 +#endif
63764 +
63765 /* If modprobe needs a service that is in a module, we get a recursive
63766 * loop. Limit the number of running kmod threads to max_threads/2 or
63767 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63768 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
63769 atomic_dec(&kmod_concurrent);
63770 return ret;
63771 }
63772 +
63773 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63774 +{
63775 + va_list args;
63776 + int ret;
63777 +
63778 + va_start(args, fmt);
63779 + ret = ____request_module(wait, module_param, fmt, args);
63780 + va_end(args);
63781 +
63782 + return ret;
63783 +}
63784 +
63785 +int __request_module(bool wait, const char *fmt, ...)
63786 +{
63787 + va_list args;
63788 + int ret;
63789 +
63790 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63791 + if (current_uid()) {
63792 + char module_param[MODULE_NAME_LEN];
63793 +
63794 + memset(module_param, 0, sizeof(module_param));
63795 +
63796 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63797 +
63798 + va_start(args, fmt);
63799 + ret = ____request_module(wait, module_param, fmt, args);
63800 + va_end(args);
63801 +
63802 + return ret;
63803 + }
63804 +#endif
63805 +
63806 + va_start(args, fmt);
63807 + ret = ____request_module(wait, NULL, fmt, args);
63808 + va_end(args);
63809 +
63810 + return ret;
63811 +}
63812 +
63813 EXPORT_SYMBOL(__request_module);
63814 #endif /* CONFIG_MODULES */
63815
63816 @@ -220,7 +272,7 @@ static int wait_for_helper(void *data)
63817 *
63818 * Thus the __user pointer cast is valid here.
63819 */
63820 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
63821 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63822
63823 /*
63824 * If ret is 0, either ____call_usermodehelper failed and the
63825 diff -urNp linux-3.0.8/kernel/kprobes.c linux-3.0.8/kernel/kprobes.c
63826 --- linux-3.0.8/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
63827 +++ linux-3.0.8/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
63828 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63829 * kernel image and loaded module images reside. This is required
63830 * so x86_64 can correctly handle the %rip-relative fixups.
63831 */
63832 - kip->insns = module_alloc(PAGE_SIZE);
63833 + kip->insns = module_alloc_exec(PAGE_SIZE);
63834 if (!kip->insns) {
63835 kfree(kip);
63836 return NULL;
63837 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63838 */
63839 if (!list_is_singular(&kip->list)) {
63840 list_del(&kip->list);
63841 - module_free(NULL, kip->insns);
63842 + module_free_exec(NULL, kip->insns);
63843 kfree(kip);
63844 }
63845 return 1;
63846 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
63847 {
63848 int i, err = 0;
63849 unsigned long offset = 0, size = 0;
63850 - char *modname, namebuf[128];
63851 + char *modname, namebuf[KSYM_NAME_LEN];
63852 const char *symbol_name;
63853 void *addr;
63854 struct kprobe_blackpoint *kb;
63855 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
63856 const char *sym = NULL;
63857 unsigned int i = *(loff_t *) v;
63858 unsigned long offset = 0;
63859 - char *modname, namebuf[128];
63860 + char *modname, namebuf[KSYM_NAME_LEN];
63861
63862 head = &kprobe_table[i];
63863 preempt_disable();
63864 diff -urNp linux-3.0.8/kernel/lockdep.c linux-3.0.8/kernel/lockdep.c
63865 --- linux-3.0.8/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
63866 +++ linux-3.0.8/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
63867 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
63868 end = (unsigned long) &_end,
63869 addr = (unsigned long) obj;
63870
63871 +#ifdef CONFIG_PAX_KERNEXEC
63872 + start = ktla_ktva(start);
63873 +#endif
63874 +
63875 /*
63876 * static variable?
63877 */
63878 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63879 if (!static_obj(lock->key)) {
63880 debug_locks_off();
63881 printk("INFO: trying to register non-static key.\n");
63882 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63883 printk("the code is fine but needs lockdep annotation.\n");
63884 printk("turning off the locking correctness validator.\n");
63885 dump_stack();
63886 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
63887 if (!class)
63888 return 0;
63889 }
63890 - atomic_inc((atomic_t *)&class->ops);
63891 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63892 if (very_verbose(class)) {
63893 printk("\nacquire class [%p] %s", class->key, class->name);
63894 if (class->name_version > 1)
63895 diff -urNp linux-3.0.8/kernel/lockdep_proc.c linux-3.0.8/kernel/lockdep_proc.c
63896 --- linux-3.0.8/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
63897 +++ linux-3.0.8/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
63898 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63899
63900 static void print_name(struct seq_file *m, struct lock_class *class)
63901 {
63902 - char str[128];
63903 + char str[KSYM_NAME_LEN];
63904 const char *name = class->name;
63905
63906 if (!name) {
63907 diff -urNp linux-3.0.8/kernel/module.c linux-3.0.8/kernel/module.c
63908 --- linux-3.0.8/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
63909 +++ linux-3.0.8/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
63910 @@ -58,6 +58,7 @@
63911 #include <linux/jump_label.h>
63912 #include <linux/pfn.h>
63913 #include <linux/bsearch.h>
63914 +#include <linux/grsecurity.h>
63915
63916 #define CREATE_TRACE_POINTS
63917 #include <trace/events/module.h>
63918 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63919
63920 /* Bounds of module allocation, for speeding __module_address.
63921 * Protected by module_mutex. */
63922 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63923 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63924 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63925
63926 int register_module_notifier(struct notifier_block * nb)
63927 {
63928 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63929 return true;
63930
63931 list_for_each_entry_rcu(mod, &modules, list) {
63932 - struct symsearch arr[] = {
63933 + struct symsearch modarr[] = {
63934 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63935 NOT_GPL_ONLY, false },
63936 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63937 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63938 #endif
63939 };
63940
63941 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63942 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63943 return true;
63944 }
63945 return false;
63946 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63947 static int percpu_modalloc(struct module *mod,
63948 unsigned long size, unsigned long align)
63949 {
63950 - if (align > PAGE_SIZE) {
63951 + if (align-1 >= PAGE_SIZE) {
63952 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63953 mod->name, align, PAGE_SIZE);
63954 align = PAGE_SIZE;
63955 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
63956 */
63957 #ifdef CONFIG_SYSFS
63958
63959 -#ifdef CONFIG_KALLSYMS
63960 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63961 static inline bool sect_empty(const Elf_Shdr *sect)
63962 {
63963 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63964 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
63965
63966 static void unset_module_core_ro_nx(struct module *mod)
63967 {
63968 - set_page_attributes(mod->module_core + mod->core_text_size,
63969 - mod->module_core + mod->core_size,
63970 + set_page_attributes(mod->module_core_rw,
63971 + mod->module_core_rw + mod->core_size_rw,
63972 set_memory_x);
63973 - set_page_attributes(mod->module_core,
63974 - mod->module_core + mod->core_ro_size,
63975 + set_page_attributes(mod->module_core_rx,
63976 + mod->module_core_rx + mod->core_size_rx,
63977 set_memory_rw);
63978 }
63979
63980 static void unset_module_init_ro_nx(struct module *mod)
63981 {
63982 - set_page_attributes(mod->module_init + mod->init_text_size,
63983 - mod->module_init + mod->init_size,
63984 + set_page_attributes(mod->module_init_rw,
63985 + mod->module_init_rw + mod->init_size_rw,
63986 set_memory_x);
63987 - set_page_attributes(mod->module_init,
63988 - mod->module_init + mod->init_ro_size,
63989 + set_page_attributes(mod->module_init_rx,
63990 + mod->module_init_rx + mod->init_size_rx,
63991 set_memory_rw);
63992 }
63993
63994 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
63995
63996 mutex_lock(&module_mutex);
63997 list_for_each_entry_rcu(mod, &modules, list) {
63998 - if ((mod->module_core) && (mod->core_text_size)) {
63999 - set_page_attributes(mod->module_core,
64000 - mod->module_core + mod->core_text_size,
64001 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64002 + set_page_attributes(mod->module_core_rx,
64003 + mod->module_core_rx + mod->core_size_rx,
64004 set_memory_rw);
64005 }
64006 - if ((mod->module_init) && (mod->init_text_size)) {
64007 - set_page_attributes(mod->module_init,
64008 - mod->module_init + mod->init_text_size,
64009 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64010 + set_page_attributes(mod->module_init_rx,
64011 + mod->module_init_rx + mod->init_size_rx,
64012 set_memory_rw);
64013 }
64014 }
64015 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
64016
64017 mutex_lock(&module_mutex);
64018 list_for_each_entry_rcu(mod, &modules, list) {
64019 - if ((mod->module_core) && (mod->core_text_size)) {
64020 - set_page_attributes(mod->module_core,
64021 - mod->module_core + mod->core_text_size,
64022 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64023 + set_page_attributes(mod->module_core_rx,
64024 + mod->module_core_rx + mod->core_size_rx,
64025 set_memory_ro);
64026 }
64027 - if ((mod->module_init) && (mod->init_text_size)) {
64028 - set_page_attributes(mod->module_init,
64029 - mod->module_init + mod->init_text_size,
64030 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64031 + set_page_attributes(mod->module_init_rx,
64032 + mod->module_init_rx + mod->init_size_rx,
64033 set_memory_ro);
64034 }
64035 }
64036 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
64037
64038 /* This may be NULL, but that's OK */
64039 unset_module_init_ro_nx(mod);
64040 - module_free(mod, mod->module_init);
64041 + module_free(mod, mod->module_init_rw);
64042 + module_free_exec(mod, mod->module_init_rx);
64043 kfree(mod->args);
64044 percpu_modfree(mod);
64045
64046 /* Free lock-classes: */
64047 - lockdep_free_key_range(mod->module_core, mod->core_size);
64048 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64049 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64050
64051 /* Finally, free the core (containing the module structure) */
64052 unset_module_core_ro_nx(mod);
64053 - module_free(mod, mod->module_core);
64054 + module_free_exec(mod, mod->module_core_rx);
64055 + module_free(mod, mod->module_core_rw);
64056
64057 #ifdef CONFIG_MPU
64058 update_protections(current->mm);
64059 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
64060 unsigned int i;
64061 int ret = 0;
64062 const struct kernel_symbol *ksym;
64063 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64064 + int is_fs_load = 0;
64065 + int register_filesystem_found = 0;
64066 + char *p;
64067 +
64068 + p = strstr(mod->args, "grsec_modharden_fs");
64069 + if (p) {
64070 + char *endptr = p + strlen("grsec_modharden_fs");
64071 + /* copy \0 as well */
64072 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64073 + is_fs_load = 1;
64074 + }
64075 +#endif
64076
64077 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64078 const char *name = info->strtab + sym[i].st_name;
64079
64080 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64081 + /* it's a real shame this will never get ripped and copied
64082 + upstream! ;(
64083 + */
64084 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64085 + register_filesystem_found = 1;
64086 +#endif
64087 +
64088 switch (sym[i].st_shndx) {
64089 case SHN_COMMON:
64090 /* We compiled with -fno-common. These are not
64091 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
64092 ksym = resolve_symbol_wait(mod, info, name);
64093 /* Ok if resolved. */
64094 if (ksym && !IS_ERR(ksym)) {
64095 + pax_open_kernel();
64096 sym[i].st_value = ksym->value;
64097 + pax_close_kernel();
64098 break;
64099 }
64100
64101 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
64102 secbase = (unsigned long)mod_percpu(mod);
64103 else
64104 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64105 + pax_open_kernel();
64106 sym[i].st_value += secbase;
64107 + pax_close_kernel();
64108 break;
64109 }
64110 }
64111
64112 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64113 + if (is_fs_load && !register_filesystem_found) {
64114 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64115 + ret = -EPERM;
64116 + }
64117 +#endif
64118 +
64119 return ret;
64120 }
64121
64122 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
64123 || s->sh_entsize != ~0UL
64124 || strstarts(sname, ".init"))
64125 continue;
64126 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64127 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64128 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64129 + else
64130 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64131 DEBUGP("\t%s\n", name);
64132 }
64133 - switch (m) {
64134 - case 0: /* executable */
64135 - mod->core_size = debug_align(mod->core_size);
64136 - mod->core_text_size = mod->core_size;
64137 - break;
64138 - case 1: /* RO: text and ro-data */
64139 - mod->core_size = debug_align(mod->core_size);
64140 - mod->core_ro_size = mod->core_size;
64141 - break;
64142 - case 3: /* whole core */
64143 - mod->core_size = debug_align(mod->core_size);
64144 - break;
64145 - }
64146 }
64147
64148 DEBUGP("Init section allocation order:\n");
64149 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
64150 || s->sh_entsize != ~0UL
64151 || !strstarts(sname, ".init"))
64152 continue;
64153 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64154 - | INIT_OFFSET_MASK);
64155 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64156 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64157 + else
64158 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64159 + s->sh_entsize |= INIT_OFFSET_MASK;
64160 DEBUGP("\t%s\n", sname);
64161 }
64162 - switch (m) {
64163 - case 0: /* executable */
64164 - mod->init_size = debug_align(mod->init_size);
64165 - mod->init_text_size = mod->init_size;
64166 - break;
64167 - case 1: /* RO: text and ro-data */
64168 - mod->init_size = debug_align(mod->init_size);
64169 - mod->init_ro_size = mod->init_size;
64170 - break;
64171 - case 3: /* whole init */
64172 - mod->init_size = debug_align(mod->init_size);
64173 - break;
64174 - }
64175 }
64176 }
64177
64178 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
64179
64180 /* Put symbol section at end of init part of module. */
64181 symsect->sh_flags |= SHF_ALLOC;
64182 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64183 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64184 info->index.sym) | INIT_OFFSET_MASK;
64185 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64186
64187 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
64188 }
64189
64190 /* Append room for core symbols at end of core part. */
64191 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64192 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64193 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64194 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64195
64196 /* Put string table section at end of init part of module. */
64197 strsect->sh_flags |= SHF_ALLOC;
64198 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64199 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64200 info->index.str) | INIT_OFFSET_MASK;
64201 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64202
64203 /* Append room for core symbols' strings at end of core part. */
64204 - info->stroffs = mod->core_size;
64205 + info->stroffs = mod->core_size_rx;
64206 __set_bit(0, info->strmap);
64207 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64208 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64209 }
64210
64211 static void add_kallsyms(struct module *mod, const struct load_info *info)
64212 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
64213 /* Make sure we get permanent strtab: don't use info->strtab. */
64214 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64215
64216 + pax_open_kernel();
64217 +
64218 /* Set types up while we still have access to sections. */
64219 for (i = 0; i < mod->num_symtab; i++)
64220 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64221
64222 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64223 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64224 src = mod->symtab;
64225 *dst = *src;
64226 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64227 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
64228 }
64229 mod->core_num_syms = ndst;
64230
64231 - mod->core_strtab = s = mod->module_core + info->stroffs;
64232 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64233 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64234 if (test_bit(i, info->strmap))
64235 *++s = mod->strtab[i];
64236 +
64237 + pax_close_kernel();
64238 }
64239 #else
64240 static inline void layout_symtab(struct module *mod, struct load_info *info)
64241 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
64242 ddebug_remove_module(debug->modname);
64243 }
64244
64245 -static void *module_alloc_update_bounds(unsigned long size)
64246 +static void *module_alloc_update_bounds_rw(unsigned long size)
64247 {
64248 void *ret = module_alloc(size);
64249
64250 if (ret) {
64251 mutex_lock(&module_mutex);
64252 /* Update module bounds. */
64253 - if ((unsigned long)ret < module_addr_min)
64254 - module_addr_min = (unsigned long)ret;
64255 - if ((unsigned long)ret + size > module_addr_max)
64256 - module_addr_max = (unsigned long)ret + size;
64257 + if ((unsigned long)ret < module_addr_min_rw)
64258 + module_addr_min_rw = (unsigned long)ret;
64259 + if ((unsigned long)ret + size > module_addr_max_rw)
64260 + module_addr_max_rw = (unsigned long)ret + size;
64261 + mutex_unlock(&module_mutex);
64262 + }
64263 + return ret;
64264 +}
64265 +
64266 +static void *module_alloc_update_bounds_rx(unsigned long size)
64267 +{
64268 + void *ret = module_alloc_exec(size);
64269 +
64270 + if (ret) {
64271 + mutex_lock(&module_mutex);
64272 + /* Update module bounds. */
64273 + if ((unsigned long)ret < module_addr_min_rx)
64274 + module_addr_min_rx = (unsigned long)ret;
64275 + if ((unsigned long)ret + size > module_addr_max_rx)
64276 + module_addr_max_rx = (unsigned long)ret + size;
64277 mutex_unlock(&module_mutex);
64278 }
64279 return ret;
64280 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
64281 void *ptr;
64282
64283 /* Do the allocs. */
64284 - ptr = module_alloc_update_bounds(mod->core_size);
64285 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64286 /*
64287 * The pointer to this block is stored in the module structure
64288 * which is inside the block. Just mark it as not being a
64289 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
64290 if (!ptr)
64291 return -ENOMEM;
64292
64293 - memset(ptr, 0, mod->core_size);
64294 - mod->module_core = ptr;
64295 + memset(ptr, 0, mod->core_size_rw);
64296 + mod->module_core_rw = ptr;
64297
64298 - ptr = module_alloc_update_bounds(mod->init_size);
64299 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64300 /*
64301 * The pointer to this block is stored in the module structure
64302 * which is inside the block. This block doesn't need to be
64303 * scanned as it contains data and code that will be freed
64304 * after the module is initialized.
64305 */
64306 - kmemleak_ignore(ptr);
64307 - if (!ptr && mod->init_size) {
64308 - module_free(mod, mod->module_core);
64309 + kmemleak_not_leak(ptr);
64310 + if (!ptr && mod->init_size_rw) {
64311 + module_free(mod, mod->module_core_rw);
64312 return -ENOMEM;
64313 }
64314 - memset(ptr, 0, mod->init_size);
64315 - mod->module_init = ptr;
64316 + memset(ptr, 0, mod->init_size_rw);
64317 + mod->module_init_rw = ptr;
64318 +
64319 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64320 + kmemleak_not_leak(ptr);
64321 + if (!ptr) {
64322 + module_free(mod, mod->module_init_rw);
64323 + module_free(mod, mod->module_core_rw);
64324 + return -ENOMEM;
64325 + }
64326 +
64327 + pax_open_kernel();
64328 + memset(ptr, 0, mod->core_size_rx);
64329 + pax_close_kernel();
64330 + mod->module_core_rx = ptr;
64331 +
64332 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64333 + kmemleak_not_leak(ptr);
64334 + if (!ptr && mod->init_size_rx) {
64335 + module_free_exec(mod, mod->module_core_rx);
64336 + module_free(mod, mod->module_init_rw);
64337 + module_free(mod, mod->module_core_rw);
64338 + return -ENOMEM;
64339 + }
64340 +
64341 + pax_open_kernel();
64342 + memset(ptr, 0, mod->init_size_rx);
64343 + pax_close_kernel();
64344 + mod->module_init_rx = ptr;
64345
64346 /* Transfer each section which specifies SHF_ALLOC */
64347 DEBUGP("final section addresses:\n");
64348 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
64349 if (!(shdr->sh_flags & SHF_ALLOC))
64350 continue;
64351
64352 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64353 - dest = mod->module_init
64354 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64355 - else
64356 - dest = mod->module_core + shdr->sh_entsize;
64357 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64358 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64359 + dest = mod->module_init_rw
64360 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64361 + else
64362 + dest = mod->module_init_rx
64363 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64364 + } else {
64365 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64366 + dest = mod->module_core_rw + shdr->sh_entsize;
64367 + else
64368 + dest = mod->module_core_rx + shdr->sh_entsize;
64369 + }
64370 +
64371 + if (shdr->sh_type != SHT_NOBITS) {
64372 +
64373 +#ifdef CONFIG_PAX_KERNEXEC
64374 +#ifdef CONFIG_X86_64
64375 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64376 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64377 +#endif
64378 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64379 + pax_open_kernel();
64380 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64381 + pax_close_kernel();
64382 + } else
64383 +#endif
64384
64385 - if (shdr->sh_type != SHT_NOBITS)
64386 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64387 + }
64388 /* Update sh_addr to point to copy in image. */
64389 - shdr->sh_addr = (unsigned long)dest;
64390 +
64391 +#ifdef CONFIG_PAX_KERNEXEC
64392 + if (shdr->sh_flags & SHF_EXECINSTR)
64393 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64394 + else
64395 +#endif
64396 +
64397 + shdr->sh_addr = (unsigned long)dest;
64398 DEBUGP("\t0x%lx %s\n",
64399 shdr->sh_addr, info->secstrings + shdr->sh_name);
64400 }
64401 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
64402 * Do it before processing of module parameters, so the module
64403 * can provide parameter accessor functions of its own.
64404 */
64405 - if (mod->module_init)
64406 - flush_icache_range((unsigned long)mod->module_init,
64407 - (unsigned long)mod->module_init
64408 - + mod->init_size);
64409 - flush_icache_range((unsigned long)mod->module_core,
64410 - (unsigned long)mod->module_core + mod->core_size);
64411 + if (mod->module_init_rx)
64412 + flush_icache_range((unsigned long)mod->module_init_rx,
64413 + (unsigned long)mod->module_init_rx
64414 + + mod->init_size_rx);
64415 + flush_icache_range((unsigned long)mod->module_core_rx,
64416 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64417
64418 set_fs(old_fs);
64419 }
64420 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
64421 {
64422 kfree(info->strmap);
64423 percpu_modfree(mod);
64424 - module_free(mod, mod->module_init);
64425 - module_free(mod, mod->module_core);
64426 + module_free_exec(mod, mod->module_init_rx);
64427 + module_free_exec(mod, mod->module_core_rx);
64428 + module_free(mod, mod->module_init_rw);
64429 + module_free(mod, mod->module_core_rw);
64430 }
64431
64432 static int post_relocation(struct module *mod, const struct load_info *info)
64433 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
64434 if (err)
64435 goto free_unload;
64436
64437 + /* Now copy in args */
64438 + mod->args = strndup_user(uargs, ~0UL >> 1);
64439 + if (IS_ERR(mod->args)) {
64440 + err = PTR_ERR(mod->args);
64441 + goto free_unload;
64442 + }
64443 +
64444 /* Set up MODINFO_ATTR fields */
64445 setup_modinfo(mod, &info);
64446
64447 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64448 + {
64449 + char *p, *p2;
64450 +
64451 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64452 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64453 + err = -EPERM;
64454 + goto free_modinfo;
64455 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64456 + p += strlen("grsec_modharden_normal");
64457 + p2 = strstr(p, "_");
64458 + if (p2) {
64459 + *p2 = '\0';
64460 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64461 + *p2 = '_';
64462 + }
64463 + err = -EPERM;
64464 + goto free_modinfo;
64465 + }
64466 + }
64467 +#endif
64468 +
64469 /* Fix up syms, so that st_value is a pointer to location. */
64470 err = simplify_symbols(mod, &info);
64471 if (err < 0)
64472 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
64473
64474 flush_module_icache(mod);
64475
64476 - /* Now copy in args */
64477 - mod->args = strndup_user(uargs, ~0UL >> 1);
64478 - if (IS_ERR(mod->args)) {
64479 - err = PTR_ERR(mod->args);
64480 - goto free_arch_cleanup;
64481 - }
64482 -
64483 /* Mark state as coming so strong_try_module_get() ignores us. */
64484 mod->state = MODULE_STATE_COMING;
64485
64486 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
64487 unlock:
64488 mutex_unlock(&module_mutex);
64489 synchronize_sched();
64490 - kfree(mod->args);
64491 - free_arch_cleanup:
64492 module_arch_cleanup(mod);
64493 free_modinfo:
64494 free_modinfo(mod);
64495 + kfree(mod->args);
64496 free_unload:
64497 module_unload_free(mod);
64498 free_module:
64499 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
64500 MODULE_STATE_COMING, mod);
64501
64502 /* Set RO and NX regions for core */
64503 - set_section_ro_nx(mod->module_core,
64504 - mod->core_text_size,
64505 - mod->core_ro_size,
64506 - mod->core_size);
64507 + set_section_ro_nx(mod->module_core_rx,
64508 + mod->core_size_rx,
64509 + mod->core_size_rx,
64510 + mod->core_size_rx);
64511
64512 /* Set RO and NX regions for init */
64513 - set_section_ro_nx(mod->module_init,
64514 - mod->init_text_size,
64515 - mod->init_ro_size,
64516 - mod->init_size);
64517 + set_section_ro_nx(mod->module_init_rx,
64518 + mod->init_size_rx,
64519 + mod->init_size_rx,
64520 + mod->init_size_rx);
64521
64522 do_mod_ctors(mod);
64523 /* Start the module */
64524 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
64525 mod->strtab = mod->core_strtab;
64526 #endif
64527 unset_module_init_ro_nx(mod);
64528 - module_free(mod, mod->module_init);
64529 - mod->module_init = NULL;
64530 - mod->init_size = 0;
64531 - mod->init_ro_size = 0;
64532 - mod->init_text_size = 0;
64533 + module_free(mod, mod->module_init_rw);
64534 + module_free_exec(mod, mod->module_init_rx);
64535 + mod->module_init_rw = NULL;
64536 + mod->module_init_rx = NULL;
64537 + mod->init_size_rw = 0;
64538 + mod->init_size_rx = 0;
64539 mutex_unlock(&module_mutex);
64540
64541 return 0;
64542 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
64543 unsigned long nextval;
64544
64545 /* At worse, next value is at end of module */
64546 - if (within_module_init(addr, mod))
64547 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64548 + if (within_module_init_rx(addr, mod))
64549 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64550 + else if (within_module_init_rw(addr, mod))
64551 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64552 + else if (within_module_core_rx(addr, mod))
64553 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64554 + else if (within_module_core_rw(addr, mod))
64555 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64556 else
64557 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64558 + return NULL;
64559
64560 /* Scan for closest preceding symbol, and next symbol. (ELF
64561 starts real symbols at 1). */
64562 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
64563 char buf[8];
64564
64565 seq_printf(m, "%s %u",
64566 - mod->name, mod->init_size + mod->core_size);
64567 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64568 print_unload_info(m, mod);
64569
64570 /* Informative for users. */
64571 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
64572 mod->state == MODULE_STATE_COMING ? "Loading":
64573 "Live");
64574 /* Used by oprofile and other similar tools. */
64575 - seq_printf(m, " 0x%pK", mod->module_core);
64576 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64577
64578 /* Taints info */
64579 if (mod->taints)
64580 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
64581
64582 static int __init proc_modules_init(void)
64583 {
64584 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64585 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64586 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64587 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64588 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64589 +#else
64590 proc_create("modules", 0, NULL, &proc_modules_operations);
64591 +#endif
64592 +#else
64593 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64594 +#endif
64595 return 0;
64596 }
64597 module_init(proc_modules_init);
64598 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
64599 {
64600 struct module *mod;
64601
64602 - if (addr < module_addr_min || addr > module_addr_max)
64603 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64604 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64605 return NULL;
64606
64607 list_for_each_entry_rcu(mod, &modules, list)
64608 - if (within_module_core(addr, mod)
64609 - || within_module_init(addr, mod))
64610 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64611 return mod;
64612 return NULL;
64613 }
64614 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
64615 */
64616 struct module *__module_text_address(unsigned long addr)
64617 {
64618 - struct module *mod = __module_address(addr);
64619 + struct module *mod;
64620 +
64621 +#ifdef CONFIG_X86_32
64622 + addr = ktla_ktva(addr);
64623 +#endif
64624 +
64625 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64626 + return NULL;
64627 +
64628 + mod = __module_address(addr);
64629 +
64630 if (mod) {
64631 /* Make sure it's within the text section. */
64632 - if (!within(addr, mod->module_init, mod->init_text_size)
64633 - && !within(addr, mod->module_core, mod->core_text_size))
64634 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64635 mod = NULL;
64636 }
64637 return mod;
64638 diff -urNp linux-3.0.8/kernel/mutex.c linux-3.0.8/kernel/mutex.c
64639 --- linux-3.0.8/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
64640 +++ linux-3.0.8/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
64641 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64642 spin_lock_mutex(&lock->wait_lock, flags);
64643
64644 debug_mutex_lock_common(lock, &waiter);
64645 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64646 + debug_mutex_add_waiter(lock, &waiter, task);
64647
64648 /* add waiting tasks to the end of the waitqueue (FIFO): */
64649 list_add_tail(&waiter.list, &lock->wait_list);
64650 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64651 * TASK_UNINTERRUPTIBLE case.)
64652 */
64653 if (unlikely(signal_pending_state(state, task))) {
64654 - mutex_remove_waiter(lock, &waiter,
64655 - task_thread_info(task));
64656 + mutex_remove_waiter(lock, &waiter, task);
64657 mutex_release(&lock->dep_map, 1, ip);
64658 spin_unlock_mutex(&lock->wait_lock, flags);
64659
64660 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64661 done:
64662 lock_acquired(&lock->dep_map, ip);
64663 /* got the lock - rejoice! */
64664 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64665 + mutex_remove_waiter(lock, &waiter, task);
64666 mutex_set_owner(lock);
64667
64668 /* set it to 0 if there are no waiters left: */
64669 diff -urNp linux-3.0.8/kernel/mutex-debug.c linux-3.0.8/kernel/mutex-debug.c
64670 --- linux-3.0.8/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
64671 +++ linux-3.0.8/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
64672 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64673 }
64674
64675 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64676 - struct thread_info *ti)
64677 + struct task_struct *task)
64678 {
64679 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64680
64681 /* Mark the current thread as blocked on the lock: */
64682 - ti->task->blocked_on = waiter;
64683 + task->blocked_on = waiter;
64684 }
64685
64686 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64687 - struct thread_info *ti)
64688 + struct task_struct *task)
64689 {
64690 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64691 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64692 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64693 - ti->task->blocked_on = NULL;
64694 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64695 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64696 + task->blocked_on = NULL;
64697
64698 list_del_init(&waiter->list);
64699 waiter->task = NULL;
64700 diff -urNp linux-3.0.8/kernel/mutex-debug.h linux-3.0.8/kernel/mutex-debug.h
64701 --- linux-3.0.8/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
64702 +++ linux-3.0.8/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
64703 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64704 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64705 extern void debug_mutex_add_waiter(struct mutex *lock,
64706 struct mutex_waiter *waiter,
64707 - struct thread_info *ti);
64708 + struct task_struct *task);
64709 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64710 - struct thread_info *ti);
64711 + struct task_struct *task);
64712 extern void debug_mutex_unlock(struct mutex *lock);
64713 extern void debug_mutex_init(struct mutex *lock, const char *name,
64714 struct lock_class_key *key);
64715 diff -urNp linux-3.0.8/kernel/padata.c linux-3.0.8/kernel/padata.c
64716 --- linux-3.0.8/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
64717 +++ linux-3.0.8/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
64718 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64719 padata->pd = pd;
64720 padata->cb_cpu = cb_cpu;
64721
64722 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64723 - atomic_set(&pd->seq_nr, -1);
64724 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64725 + atomic_set_unchecked(&pd->seq_nr, -1);
64726
64727 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64728 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64729
64730 target_cpu = padata_cpu_hash(padata);
64731 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64732 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64733 padata_init_pqueues(pd);
64734 padata_init_squeues(pd);
64735 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64736 - atomic_set(&pd->seq_nr, -1);
64737 + atomic_set_unchecked(&pd->seq_nr, -1);
64738 atomic_set(&pd->reorder_objects, 0);
64739 atomic_set(&pd->refcnt, 0);
64740 pd->pinst = pinst;
64741 diff -urNp linux-3.0.8/kernel/panic.c linux-3.0.8/kernel/panic.c
64742 --- linux-3.0.8/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
64743 +++ linux-3.0.8/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
64744 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
64745 const char *board;
64746
64747 printk(KERN_WARNING "------------[ cut here ]------------\n");
64748 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64749 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64750 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64751 if (board)
64752 printk(KERN_WARNING "Hardware name: %s\n", board);
64753 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64754 */
64755 void __stack_chk_fail(void)
64756 {
64757 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64758 + dump_stack();
64759 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64760 __builtin_return_address(0));
64761 }
64762 EXPORT_SYMBOL(__stack_chk_fail);
64763 diff -urNp linux-3.0.8/kernel/pid.c linux-3.0.8/kernel/pid.c
64764 --- linux-3.0.8/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
64765 +++ linux-3.0.8/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
64766 @@ -33,6 +33,7 @@
64767 #include <linux/rculist.h>
64768 #include <linux/bootmem.h>
64769 #include <linux/hash.h>
64770 +#include <linux/security.h>
64771 #include <linux/pid_namespace.h>
64772 #include <linux/init_task.h>
64773 #include <linux/syscalls.h>
64774 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64775
64776 int pid_max = PID_MAX_DEFAULT;
64777
64778 -#define RESERVED_PIDS 300
64779 +#define RESERVED_PIDS 500
64780
64781 int pid_max_min = RESERVED_PIDS + 1;
64782 int pid_max_max = PID_MAX_LIMIT;
64783 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
64784 */
64785 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64786 {
64787 + struct task_struct *task;
64788 +
64789 rcu_lockdep_assert(rcu_read_lock_held());
64790 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64791 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64792 +
64793 + if (gr_pid_is_chrooted(task))
64794 + return NULL;
64795 +
64796 + return task;
64797 }
64798
64799 struct task_struct *find_task_by_vpid(pid_t vnr)
64800 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
64801 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64802 }
64803
64804 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64805 +{
64806 + rcu_lockdep_assert(rcu_read_lock_held());
64807 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64808 +}
64809 +
64810 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64811 {
64812 struct pid *pid;
64813 diff -urNp linux-3.0.8/kernel/posix-cpu-timers.c linux-3.0.8/kernel/posix-cpu-timers.c
64814 --- linux-3.0.8/kernel/posix-cpu-timers.c 2011-10-25 09:10:33.000000000 -0400
64815 +++ linux-3.0.8/kernel/posix-cpu-timers.c 2011-10-25 09:10:41.000000000 -0400
64816 @@ -6,6 +6,7 @@
64817 #include <linux/posix-timers.h>
64818 #include <linux/errno.h>
64819 #include <linux/math64.h>
64820 +#include <linux/security.h>
64821 #include <asm/uaccess.h>
64822 #include <linux/kernel_stat.h>
64823 #include <trace/events/timer.h>
64824 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64825
64826 static __init int init_posix_cpu_timers(void)
64827 {
64828 - struct k_clock process = {
64829 + static struct k_clock process = {
64830 .clock_getres = process_cpu_clock_getres,
64831 .clock_get = process_cpu_clock_get,
64832 .timer_create = process_cpu_timer_create,
64833 .nsleep = process_cpu_nsleep,
64834 .nsleep_restart = process_cpu_nsleep_restart,
64835 };
64836 - struct k_clock thread = {
64837 + static struct k_clock thread = {
64838 .clock_getres = thread_cpu_clock_getres,
64839 .clock_get = thread_cpu_clock_get,
64840 .timer_create = thread_cpu_timer_create,
64841 diff -urNp linux-3.0.8/kernel/posix-timers.c linux-3.0.8/kernel/posix-timers.c
64842 --- linux-3.0.8/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
64843 +++ linux-3.0.8/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
64844 @@ -43,6 +43,7 @@
64845 #include <linux/idr.h>
64846 #include <linux/posix-clock.h>
64847 #include <linux/posix-timers.h>
64848 +#include <linux/grsecurity.h>
64849 #include <linux/syscalls.h>
64850 #include <linux/wait.h>
64851 #include <linux/workqueue.h>
64852 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64853 * which we beg off on and pass to do_sys_settimeofday().
64854 */
64855
64856 -static struct k_clock posix_clocks[MAX_CLOCKS];
64857 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64858
64859 /*
64860 * These ones are defined below.
64861 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64862 */
64863 static __init int init_posix_timers(void)
64864 {
64865 - struct k_clock clock_realtime = {
64866 + static struct k_clock clock_realtime = {
64867 .clock_getres = hrtimer_get_res,
64868 .clock_get = posix_clock_realtime_get,
64869 .clock_set = posix_clock_realtime_set,
64870 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64871 .timer_get = common_timer_get,
64872 .timer_del = common_timer_del,
64873 };
64874 - struct k_clock clock_monotonic = {
64875 + static struct k_clock clock_monotonic = {
64876 .clock_getres = hrtimer_get_res,
64877 .clock_get = posix_ktime_get_ts,
64878 .nsleep = common_nsleep,
64879 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64880 .timer_get = common_timer_get,
64881 .timer_del = common_timer_del,
64882 };
64883 - struct k_clock clock_monotonic_raw = {
64884 + static struct k_clock clock_monotonic_raw = {
64885 .clock_getres = hrtimer_get_res,
64886 .clock_get = posix_get_monotonic_raw,
64887 };
64888 - struct k_clock clock_realtime_coarse = {
64889 + static struct k_clock clock_realtime_coarse = {
64890 .clock_getres = posix_get_coarse_res,
64891 .clock_get = posix_get_realtime_coarse,
64892 };
64893 - struct k_clock clock_monotonic_coarse = {
64894 + static struct k_clock clock_monotonic_coarse = {
64895 .clock_getres = posix_get_coarse_res,
64896 .clock_get = posix_get_monotonic_coarse,
64897 };
64898 - struct k_clock clock_boottime = {
64899 + static struct k_clock clock_boottime = {
64900 .clock_getres = hrtimer_get_res,
64901 .clock_get = posix_get_boottime,
64902 .nsleep = common_nsleep,
64903 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64904 .timer_del = common_timer_del,
64905 };
64906
64907 + pax_track_stack();
64908 +
64909 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64910 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64911 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64912 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64913 return;
64914 }
64915
64916 - posix_clocks[clock_id] = *new_clock;
64917 + posix_clocks[clock_id] = new_clock;
64918 }
64919 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64920
64921 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64922 return (id & CLOCKFD_MASK) == CLOCKFD ?
64923 &clock_posix_dynamic : &clock_posix_cpu;
64924
64925 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64926 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64927 return NULL;
64928 - return &posix_clocks[id];
64929 + return posix_clocks[id];
64930 }
64931
64932 static int common_timer_create(struct k_itimer *new_timer)
64933 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64934 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64935 return -EFAULT;
64936
64937 + /* only the CLOCK_REALTIME clock can be set, all other clocks
64938 + have their clock_set fptr set to a nosettime dummy function
64939 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64940 + call common_clock_set, which calls do_sys_settimeofday, which
64941 + we hook
64942 + */
64943 +
64944 return kc->clock_set(which_clock, &new_tp);
64945 }
64946
64947 diff -urNp linux-3.0.8/kernel/power/poweroff.c linux-3.0.8/kernel/power/poweroff.c
64948 --- linux-3.0.8/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
64949 +++ linux-3.0.8/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
64950 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64951 .enable_mask = SYSRQ_ENABLE_BOOT,
64952 };
64953
64954 -static int pm_sysrq_init(void)
64955 +static int __init pm_sysrq_init(void)
64956 {
64957 register_sysrq_key('o', &sysrq_poweroff_op);
64958 return 0;
64959 diff -urNp linux-3.0.8/kernel/power/process.c linux-3.0.8/kernel/power/process.c
64960 --- linux-3.0.8/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
64961 +++ linux-3.0.8/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
64962 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64963 u64 elapsed_csecs64;
64964 unsigned int elapsed_csecs;
64965 bool wakeup = false;
64966 + bool timedout = false;
64967
64968 do_gettimeofday(&start);
64969
64970 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64971
64972 while (true) {
64973 todo = 0;
64974 + if (time_after(jiffies, end_time))
64975 + timedout = true;
64976 read_lock(&tasklist_lock);
64977 do_each_thread(g, p) {
64978 if (frozen(p) || !freezable(p))
64979 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64980 * try_to_stop() after schedule() in ptrace/signal
64981 * stop sees TIF_FREEZE.
64982 */
64983 - if (!task_is_stopped_or_traced(p) &&
64984 - !freezer_should_skip(p))
64985 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64986 todo++;
64987 + if (timedout) {
64988 + printk(KERN_ERR "Task refusing to freeze:\n");
64989 + sched_show_task(p);
64990 + }
64991 + }
64992 } while_each_thread(g, p);
64993 read_unlock(&tasklist_lock);
64994
64995 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64996 todo += wq_busy;
64997 }
64998
64999 - if (!todo || time_after(jiffies, end_time))
65000 + if (!todo || timedout)
65001 break;
65002
65003 if (pm_wakeup_pending()) {
65004 diff -urNp linux-3.0.8/kernel/printk.c linux-3.0.8/kernel/printk.c
65005 --- linux-3.0.8/kernel/printk.c 2011-10-24 08:05:30.000000000 -0400
65006 +++ linux-3.0.8/kernel/printk.c 2011-10-16 21:55:28.000000000 -0400
65007 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
65008 if (from_file && type != SYSLOG_ACTION_OPEN)
65009 return 0;
65010
65011 +#ifdef CONFIG_GRKERNSEC_DMESG
65012 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65013 + return -EPERM;
65014 +#endif
65015 +
65016 if (syslog_action_restricted(type)) {
65017 if (capable(CAP_SYSLOG))
65018 return 0;
65019 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
65020 if (capable(CAP_SYS_ADMIN)) {
65021 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
65022 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
65023 "but no CAP_SYSLOG (deprecated).\n");
65024 return 0;
65025 }
65026 diff -urNp linux-3.0.8/kernel/profile.c linux-3.0.8/kernel/profile.c
65027 --- linux-3.0.8/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
65028 +++ linux-3.0.8/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
65029 @@ -39,7 +39,7 @@ struct profile_hit {
65030 /* Oprofile timer tick hook */
65031 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65032
65033 -static atomic_t *prof_buffer;
65034 +static atomic_unchecked_t *prof_buffer;
65035 static unsigned long prof_len, prof_shift;
65036
65037 int prof_on __read_mostly;
65038 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65039 hits[i].pc = 0;
65040 continue;
65041 }
65042 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65043 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65044 hits[i].hits = hits[i].pc = 0;
65045 }
65046 }
65047 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
65048 * Add the current hit(s) and flush the write-queue out
65049 * to the global buffer:
65050 */
65051 - atomic_add(nr_hits, &prof_buffer[pc]);
65052 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65053 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65054 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65055 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65056 hits[i].pc = hits[i].hits = 0;
65057 }
65058 out:
65059 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
65060 {
65061 unsigned long pc;
65062 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65063 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65064 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65065 }
65066 #endif /* !CONFIG_SMP */
65067
65068 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
65069 return -EFAULT;
65070 buf++; p++; count--; read++;
65071 }
65072 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65073 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65074 if (copy_to_user(buf, (void *)pnt, count))
65075 return -EFAULT;
65076 read += count;
65077 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
65078 }
65079 #endif
65080 profile_discard_flip_buffers();
65081 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65082 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65083 return count;
65084 }
65085
65086 diff -urNp linux-3.0.8/kernel/ptrace.c linux-3.0.8/kernel/ptrace.c
65087 --- linux-3.0.8/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
65088 +++ linux-3.0.8/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
65089 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
65090 return ret;
65091 }
65092
65093 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65094 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65095 + unsigned int log)
65096 {
65097 const struct cred *cred = current_cred(), *tcred;
65098
65099 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
65100 cred->gid == tcred->sgid &&
65101 cred->gid == tcred->gid))
65102 goto ok;
65103 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65104 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65105 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65106 goto ok;
65107 rcu_read_unlock();
65108 return -EPERM;
65109 @@ -167,7 +169,9 @@ ok:
65110 smp_rmb();
65111 if (task->mm)
65112 dumpable = get_dumpable(task->mm);
65113 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65114 + if (!dumpable &&
65115 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65116 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65117 return -EPERM;
65118
65119 return security_ptrace_access_check(task, mode);
65120 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
65121 {
65122 int err;
65123 task_lock(task);
65124 - err = __ptrace_may_access(task, mode);
65125 + err = __ptrace_may_access(task, mode, 0);
65126 + task_unlock(task);
65127 + return !err;
65128 +}
65129 +
65130 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65131 +{
65132 + int err;
65133 + task_lock(task);
65134 + err = __ptrace_may_access(task, mode, 1);
65135 task_unlock(task);
65136 return !err;
65137 }
65138 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
65139 goto out;
65140
65141 task_lock(task);
65142 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65143 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65144 task_unlock(task);
65145 if (retval)
65146 goto unlock_creds;
65147 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
65148 goto unlock_tasklist;
65149
65150 task->ptrace = PT_PTRACED;
65151 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65152 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65153 task->ptrace |= PT_PTRACE_CAP;
65154
65155 __ptrace_link(task, current);
65156 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
65157 {
65158 int copied = 0;
65159
65160 + pax_track_stack();
65161 +
65162 while (len > 0) {
65163 char buf[128];
65164 int this_len, retval;
65165 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
65166 break;
65167 return -EIO;
65168 }
65169 - if (copy_to_user(dst, buf, retval))
65170 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65171 return -EFAULT;
65172 copied += retval;
65173 src += retval;
65174 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
65175 {
65176 int copied = 0;
65177
65178 + pax_track_stack();
65179 +
65180 while (len > 0) {
65181 char buf[128];
65182 int this_len, retval;
65183 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
65184 {
65185 int ret = -EIO;
65186 siginfo_t siginfo;
65187 - void __user *datavp = (void __user *) data;
65188 + void __user *datavp = (__force void __user *) data;
65189 unsigned long __user *datalp = datavp;
65190
65191 + pax_track_stack();
65192 +
65193 switch (request) {
65194 case PTRACE_PEEKTEXT:
65195 case PTRACE_PEEKDATA:
65196 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65197 goto out;
65198 }
65199
65200 + if (gr_handle_ptrace(child, request)) {
65201 + ret = -EPERM;
65202 + goto out_put_task_struct;
65203 + }
65204 +
65205 if (request == PTRACE_ATTACH) {
65206 ret = ptrace_attach(child);
65207 /*
65208 * Some architectures need to do book-keeping after
65209 * a ptrace attach.
65210 */
65211 - if (!ret)
65212 + if (!ret) {
65213 arch_ptrace_attach(child);
65214 + gr_audit_ptrace(child);
65215 + }
65216 goto out_put_task_struct;
65217 }
65218
65219 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
65220 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65221 if (copied != sizeof(tmp))
65222 return -EIO;
65223 - return put_user(tmp, (unsigned long __user *)data);
65224 + return put_user(tmp, (__force unsigned long __user *)data);
65225 }
65226
65227 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65228 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
65229 siginfo_t siginfo;
65230 int ret;
65231
65232 + pax_track_stack();
65233 +
65234 switch (request) {
65235 case PTRACE_PEEKTEXT:
65236 case PTRACE_PEEKDATA:
65237 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
65238 goto out;
65239 }
65240
65241 + if (gr_handle_ptrace(child, request)) {
65242 + ret = -EPERM;
65243 + goto out_put_task_struct;
65244 + }
65245 +
65246 if (request == PTRACE_ATTACH) {
65247 ret = ptrace_attach(child);
65248 /*
65249 * Some architectures need to do book-keeping after
65250 * a ptrace attach.
65251 */
65252 - if (!ret)
65253 + if (!ret) {
65254 arch_ptrace_attach(child);
65255 + gr_audit_ptrace(child);
65256 + }
65257 goto out_put_task_struct;
65258 }
65259
65260 diff -urNp linux-3.0.8/kernel/rcutorture.c linux-3.0.8/kernel/rcutorture.c
65261 --- linux-3.0.8/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
65262 +++ linux-3.0.8/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
65263 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65264 { 0 };
65265 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65266 { 0 };
65267 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65268 -static atomic_t n_rcu_torture_alloc;
65269 -static atomic_t n_rcu_torture_alloc_fail;
65270 -static atomic_t n_rcu_torture_free;
65271 -static atomic_t n_rcu_torture_mberror;
65272 -static atomic_t n_rcu_torture_error;
65273 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65274 +static atomic_unchecked_t n_rcu_torture_alloc;
65275 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65276 +static atomic_unchecked_t n_rcu_torture_free;
65277 +static atomic_unchecked_t n_rcu_torture_mberror;
65278 +static atomic_unchecked_t n_rcu_torture_error;
65279 static long n_rcu_torture_boost_ktrerror;
65280 static long n_rcu_torture_boost_rterror;
65281 static long n_rcu_torture_boost_failure;
65282 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65283
65284 spin_lock_bh(&rcu_torture_lock);
65285 if (list_empty(&rcu_torture_freelist)) {
65286 - atomic_inc(&n_rcu_torture_alloc_fail);
65287 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65288 spin_unlock_bh(&rcu_torture_lock);
65289 return NULL;
65290 }
65291 - atomic_inc(&n_rcu_torture_alloc);
65292 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65293 p = rcu_torture_freelist.next;
65294 list_del_init(p);
65295 spin_unlock_bh(&rcu_torture_lock);
65296 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65297 static void
65298 rcu_torture_free(struct rcu_torture *p)
65299 {
65300 - atomic_inc(&n_rcu_torture_free);
65301 + atomic_inc_unchecked(&n_rcu_torture_free);
65302 spin_lock_bh(&rcu_torture_lock);
65303 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65304 spin_unlock_bh(&rcu_torture_lock);
65305 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65306 i = rp->rtort_pipe_count;
65307 if (i > RCU_TORTURE_PIPE_LEN)
65308 i = RCU_TORTURE_PIPE_LEN;
65309 - atomic_inc(&rcu_torture_wcount[i]);
65310 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65311 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65312 rp->rtort_mbtest = 0;
65313 rcu_torture_free(rp);
65314 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
65315 i = rp->rtort_pipe_count;
65316 if (i > RCU_TORTURE_PIPE_LEN)
65317 i = RCU_TORTURE_PIPE_LEN;
65318 - atomic_inc(&rcu_torture_wcount[i]);
65319 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65320 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65321 rp->rtort_mbtest = 0;
65322 list_del(&rp->rtort_free);
65323 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
65324 i = old_rp->rtort_pipe_count;
65325 if (i > RCU_TORTURE_PIPE_LEN)
65326 i = RCU_TORTURE_PIPE_LEN;
65327 - atomic_inc(&rcu_torture_wcount[i]);
65328 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65329 old_rp->rtort_pipe_count++;
65330 cur_ops->deferred_free(old_rp);
65331 }
65332 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
65333 return;
65334 }
65335 if (p->rtort_mbtest == 0)
65336 - atomic_inc(&n_rcu_torture_mberror);
65337 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65338 spin_lock(&rand_lock);
65339 cur_ops->read_delay(&rand);
65340 n_rcu_torture_timers++;
65341 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
65342 continue;
65343 }
65344 if (p->rtort_mbtest == 0)
65345 - atomic_inc(&n_rcu_torture_mberror);
65346 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65347 cur_ops->read_delay(&rand);
65348 preempt_disable();
65349 pipe_count = p->rtort_pipe_count;
65350 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
65351 rcu_torture_current,
65352 rcu_torture_current_version,
65353 list_empty(&rcu_torture_freelist),
65354 - atomic_read(&n_rcu_torture_alloc),
65355 - atomic_read(&n_rcu_torture_alloc_fail),
65356 - atomic_read(&n_rcu_torture_free),
65357 - atomic_read(&n_rcu_torture_mberror),
65358 + atomic_read_unchecked(&n_rcu_torture_alloc),
65359 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65360 + atomic_read_unchecked(&n_rcu_torture_free),
65361 + atomic_read_unchecked(&n_rcu_torture_mberror),
65362 n_rcu_torture_boost_ktrerror,
65363 n_rcu_torture_boost_rterror,
65364 n_rcu_torture_boost_failure,
65365 n_rcu_torture_boosts,
65366 n_rcu_torture_timers);
65367 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65368 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65369 n_rcu_torture_boost_ktrerror != 0 ||
65370 n_rcu_torture_boost_rterror != 0 ||
65371 n_rcu_torture_boost_failure != 0)
65372 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
65373 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65374 if (i > 1) {
65375 cnt += sprintf(&page[cnt], "!!! ");
65376 - atomic_inc(&n_rcu_torture_error);
65377 + atomic_inc_unchecked(&n_rcu_torture_error);
65378 WARN_ON_ONCE(1);
65379 }
65380 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65381 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
65382 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65383 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65384 cnt += sprintf(&page[cnt], " %d",
65385 - atomic_read(&rcu_torture_wcount[i]));
65386 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65387 }
65388 cnt += sprintf(&page[cnt], "\n");
65389 if (cur_ops->stats)
65390 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
65391
65392 if (cur_ops->cleanup)
65393 cur_ops->cleanup();
65394 - if (atomic_read(&n_rcu_torture_error))
65395 + if (atomic_read_unchecked(&n_rcu_torture_error))
65396 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65397 else
65398 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65399 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
65400
65401 rcu_torture_current = NULL;
65402 rcu_torture_current_version = 0;
65403 - atomic_set(&n_rcu_torture_alloc, 0);
65404 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65405 - atomic_set(&n_rcu_torture_free, 0);
65406 - atomic_set(&n_rcu_torture_mberror, 0);
65407 - atomic_set(&n_rcu_torture_error, 0);
65408 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65409 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65410 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65411 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65412 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65413 n_rcu_torture_boost_ktrerror = 0;
65414 n_rcu_torture_boost_rterror = 0;
65415 n_rcu_torture_boost_failure = 0;
65416 n_rcu_torture_boosts = 0;
65417 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65418 - atomic_set(&rcu_torture_wcount[i], 0);
65419 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65420 for_each_possible_cpu(cpu) {
65421 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65422 per_cpu(rcu_torture_count, cpu)[i] = 0;
65423 diff -urNp linux-3.0.8/kernel/rcutree.c linux-3.0.8/kernel/rcutree.c
65424 --- linux-3.0.8/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
65425 +++ linux-3.0.8/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
65426 @@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
65427 }
65428 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65429 smp_mb__before_atomic_inc(); /* See above. */
65430 - atomic_inc(&rdtp->dynticks);
65431 + atomic_inc_unchecked(&rdtp->dynticks);
65432 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65433 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65434 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65435 local_irq_restore(flags);
65436
65437 /* If the interrupt queued a callback, get out of dyntick mode. */
65438 @@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
65439 return;
65440 }
65441 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65442 - atomic_inc(&rdtp->dynticks);
65443 + atomic_inc_unchecked(&rdtp->dynticks);
65444 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65445 smp_mb__after_atomic_inc(); /* See above. */
65446 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65447 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65448 local_irq_restore(flags);
65449 }
65450
65451 @@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
65452 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65453
65454 if (rdtp->dynticks_nmi_nesting == 0 &&
65455 - (atomic_read(&rdtp->dynticks) & 0x1))
65456 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65457 return;
65458 rdtp->dynticks_nmi_nesting++;
65459 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65460 - atomic_inc(&rdtp->dynticks);
65461 + atomic_inc_unchecked(&rdtp->dynticks);
65462 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65463 smp_mb__after_atomic_inc(); /* See above. */
65464 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65465 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65466 }
65467
65468 /**
65469 @@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
65470 return;
65471 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65472 smp_mb__before_atomic_inc(); /* See above. */
65473 - atomic_inc(&rdtp->dynticks);
65474 + atomic_inc_unchecked(&rdtp->dynticks);
65475 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65476 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65477 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65478 }
65479
65480 /**
65481 @@ -469,7 +469,7 @@ void rcu_irq_exit(void)
65482 */
65483 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65484 {
65485 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65486 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65487 return 0;
65488 }
65489
65490 @@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
65491 unsigned long curr;
65492 unsigned long snap;
65493
65494 - curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
65495 + curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65496 snap = (unsigned long)rdp->dynticks_snap;
65497
65498 /*
65499 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
65500 /*
65501 * Do softirq processing for the current CPU.
65502 */
65503 -static void rcu_process_callbacks(struct softirq_action *unused)
65504 +static void rcu_process_callbacks(void)
65505 {
65506 __rcu_process_callbacks(&rcu_sched_state,
65507 &__get_cpu_var(rcu_sched_data));
65508 diff -urNp linux-3.0.8/kernel/rcutree.h linux-3.0.8/kernel/rcutree.h
65509 --- linux-3.0.8/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
65510 +++ linux-3.0.8/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
65511 @@ -86,7 +86,7 @@
65512 struct rcu_dynticks {
65513 int dynticks_nesting; /* Track irq/process nesting level. */
65514 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65515 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65516 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65517 };
65518
65519 /* RCU's kthread states for tracing. */
65520 diff -urNp linux-3.0.8/kernel/rcutree_plugin.h linux-3.0.8/kernel/rcutree_plugin.h
65521 --- linux-3.0.8/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
65522 +++ linux-3.0.8/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
65523 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
65524
65525 /* Clean up and exit. */
65526 smp_mb(); /* ensure expedited GP seen before counter increment. */
65527 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65528 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65529 unlock_mb_ret:
65530 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65531 mb_ret:
65532 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
65533
65534 #else /* #ifndef CONFIG_SMP */
65535
65536 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65537 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65538 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65539 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65540
65541 static int synchronize_sched_expedited_cpu_stop(void *data)
65542 {
65543 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
65544 int firstsnap, s, snap, trycount = 0;
65545
65546 /* Note that atomic_inc_return() implies full memory barrier. */
65547 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65548 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65549 get_online_cpus();
65550
65551 /*
65552 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
65553 }
65554
65555 /* Check to see if someone else did our work for us. */
65556 - s = atomic_read(&sync_sched_expedited_done);
65557 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65558 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65559 smp_mb(); /* ensure test happens before caller kfree */
65560 return;
65561 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
65562 * grace period works for us.
65563 */
65564 get_online_cpus();
65565 - snap = atomic_read(&sync_sched_expedited_started) - 1;
65566 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65567 smp_mb(); /* ensure read is before try_stop_cpus(). */
65568 }
65569
65570 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
65571 * than we did beat us to the punch.
65572 */
65573 do {
65574 - s = atomic_read(&sync_sched_expedited_done);
65575 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65576 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65577 smp_mb(); /* ensure test happens before caller kfree */
65578 break;
65579 }
65580 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65581 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65582
65583 put_online_cpus();
65584 }
65585 diff -urNp linux-3.0.8/kernel/relay.c linux-3.0.8/kernel/relay.c
65586 --- linux-3.0.8/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
65587 +++ linux-3.0.8/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
65588 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
65589 };
65590 ssize_t ret;
65591
65592 + pax_track_stack();
65593 +
65594 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65595 return 0;
65596 if (splice_grow_spd(pipe, &spd))
65597 diff -urNp linux-3.0.8/kernel/resource.c linux-3.0.8/kernel/resource.c
65598 --- linux-3.0.8/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
65599 +++ linux-3.0.8/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
65600 @@ -141,8 +141,18 @@ static const struct file_operations proc
65601
65602 static int __init ioresources_init(void)
65603 {
65604 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65605 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65606 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65607 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65608 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65609 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65610 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65611 +#endif
65612 +#else
65613 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65614 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65615 +#endif
65616 return 0;
65617 }
65618 __initcall(ioresources_init);
65619 diff -urNp linux-3.0.8/kernel/rtmutex-tester.c linux-3.0.8/kernel/rtmutex-tester.c
65620 --- linux-3.0.8/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
65621 +++ linux-3.0.8/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
65622 @@ -20,7 +20,7 @@
65623 #define MAX_RT_TEST_MUTEXES 8
65624
65625 static spinlock_t rttest_lock;
65626 -static atomic_t rttest_event;
65627 +static atomic_unchecked_t rttest_event;
65628
65629 struct test_thread_data {
65630 int opcode;
65631 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
65632
65633 case RTTEST_LOCKCONT:
65634 td->mutexes[td->opdata] = 1;
65635 - td->event = atomic_add_return(1, &rttest_event);
65636 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65637 return 0;
65638
65639 case RTTEST_RESET:
65640 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65641 return 0;
65642
65643 case RTTEST_RESETEVENT:
65644 - atomic_set(&rttest_event, 0);
65645 + atomic_set_unchecked(&rttest_event, 0);
65646 return 0;
65647
65648 default:
65649 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65650 return ret;
65651
65652 td->mutexes[id] = 1;
65653 - td->event = atomic_add_return(1, &rttest_event);
65654 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65655 rt_mutex_lock(&mutexes[id]);
65656 - td->event = atomic_add_return(1, &rttest_event);
65657 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65658 td->mutexes[id] = 4;
65659 return 0;
65660
65661 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65662 return ret;
65663
65664 td->mutexes[id] = 1;
65665 - td->event = atomic_add_return(1, &rttest_event);
65666 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65667 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65668 - td->event = atomic_add_return(1, &rttest_event);
65669 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65670 td->mutexes[id] = ret ? 0 : 4;
65671 return ret ? -EINTR : 0;
65672
65673 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65674 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65675 return ret;
65676
65677 - td->event = atomic_add_return(1, &rttest_event);
65678 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65679 rt_mutex_unlock(&mutexes[id]);
65680 - td->event = atomic_add_return(1, &rttest_event);
65681 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65682 td->mutexes[id] = 0;
65683 return 0;
65684
65685 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65686 break;
65687
65688 td->mutexes[dat] = 2;
65689 - td->event = atomic_add_return(1, &rttest_event);
65690 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65691 break;
65692
65693 default:
65694 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65695 return;
65696
65697 td->mutexes[dat] = 3;
65698 - td->event = atomic_add_return(1, &rttest_event);
65699 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65700 break;
65701
65702 case RTTEST_LOCKNOWAIT:
65703 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65704 return;
65705
65706 td->mutexes[dat] = 1;
65707 - td->event = atomic_add_return(1, &rttest_event);
65708 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65709 return;
65710
65711 default:
65712 diff -urNp linux-3.0.8/kernel/sched_autogroup.c linux-3.0.8/kernel/sched_autogroup.c
65713 --- linux-3.0.8/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
65714 +++ linux-3.0.8/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
65715 @@ -7,7 +7,7 @@
65716
65717 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65718 static struct autogroup autogroup_default;
65719 -static atomic_t autogroup_seq_nr;
65720 +static atomic_unchecked_t autogroup_seq_nr;
65721
65722 static void __init autogroup_init(struct task_struct *init_task)
65723 {
65724 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65725
65726 kref_init(&ag->kref);
65727 init_rwsem(&ag->lock);
65728 - ag->id = atomic_inc_return(&autogroup_seq_nr);
65729 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65730 ag->tg = tg;
65731 #ifdef CONFIG_RT_GROUP_SCHED
65732 /*
65733 diff -urNp linux-3.0.8/kernel/sched.c linux-3.0.8/kernel/sched.c
65734 --- linux-3.0.8/kernel/sched.c 2011-10-24 08:05:32.000000000 -0400
65735 +++ linux-3.0.8/kernel/sched.c 2011-10-17 23:17:19.000000000 -0400
65736 @@ -4227,6 +4227,8 @@ static void __sched __schedule(void)
65737 struct rq *rq;
65738 int cpu;
65739
65740 + pax_track_stack();
65741 +
65742 need_resched:
65743 preempt_disable();
65744 cpu = smp_processor_id();
65745 @@ -4920,6 +4922,8 @@ int can_nice(const struct task_struct *p
65746 /* convert nice value [19,-20] to rlimit style value [1,40] */
65747 int nice_rlim = 20 - nice;
65748
65749 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65750 +
65751 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65752 capable(CAP_SYS_NICE));
65753 }
65754 @@ -4953,7 +4957,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65755 if (nice > 19)
65756 nice = 19;
65757
65758 - if (increment < 0 && !can_nice(current, nice))
65759 + if (increment < 0 && (!can_nice(current, nice) ||
65760 + gr_handle_chroot_nice()))
65761 return -EPERM;
65762
65763 retval = security_task_setnice(current, nice);
65764 @@ -5097,6 +5102,7 @@ recheck:
65765 unsigned long rlim_rtprio =
65766 task_rlimit(p, RLIMIT_RTPRIO);
65767
65768 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65769 /* can't set/change the rt policy */
65770 if (policy != p->policy && !rlim_rtprio)
65771 return -EPERM;
65772 diff -urNp linux-3.0.8/kernel/sched_fair.c linux-3.0.8/kernel/sched_fair.c
65773 --- linux-3.0.8/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
65774 +++ linux-3.0.8/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
65775 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
65776 * run_rebalance_domains is triggered when needed from the scheduler tick.
65777 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65778 */
65779 -static void run_rebalance_domains(struct softirq_action *h)
65780 +static void run_rebalance_domains(void)
65781 {
65782 int this_cpu = smp_processor_id();
65783 struct rq *this_rq = cpu_rq(this_cpu);
65784 diff -urNp linux-3.0.8/kernel/signal.c linux-3.0.8/kernel/signal.c
65785 --- linux-3.0.8/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
65786 +++ linux-3.0.8/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
65787 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65788
65789 int print_fatal_signals __read_mostly;
65790
65791 -static void __user *sig_handler(struct task_struct *t, int sig)
65792 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65793 {
65794 return t->sighand->action[sig - 1].sa.sa_handler;
65795 }
65796
65797 -static int sig_handler_ignored(void __user *handler, int sig)
65798 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65799 {
65800 /* Is it explicitly or implicitly ignored? */
65801 return handler == SIG_IGN ||
65802 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65803 static int sig_task_ignored(struct task_struct *t, int sig,
65804 int from_ancestor_ns)
65805 {
65806 - void __user *handler;
65807 + __sighandler_t handler;
65808
65809 handler = sig_handler(t, sig);
65810
65811 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
65812 atomic_inc(&user->sigpending);
65813 rcu_read_unlock();
65814
65815 + if (!override_rlimit)
65816 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65817 +
65818 if (override_rlimit ||
65819 atomic_read(&user->sigpending) <=
65820 task_rlimit(t, RLIMIT_SIGPENDING)) {
65821 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
65822
65823 int unhandled_signal(struct task_struct *tsk, int sig)
65824 {
65825 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65826 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65827 if (is_global_init(tsk))
65828 return 1;
65829 if (handler != SIG_IGN && handler != SIG_DFL)
65830 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
65831 }
65832 }
65833
65834 + /* allow glibc communication via tgkill to other threads in our
65835 + thread group */
65836 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65837 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65838 + && gr_handle_signal(t, sig))
65839 + return -EPERM;
65840 +
65841 return security_task_kill(t, info, sig, 0);
65842 }
65843
65844 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
65845 return send_signal(sig, info, p, 1);
65846 }
65847
65848 -static int
65849 +int
65850 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65851 {
65852 return send_signal(sig, info, t, 0);
65853 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
65854 unsigned long int flags;
65855 int ret, blocked, ignored;
65856 struct k_sigaction *action;
65857 + int is_unhandled = 0;
65858
65859 spin_lock_irqsave(&t->sighand->siglock, flags);
65860 action = &t->sighand->action[sig-1];
65861 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
65862 }
65863 if (action->sa.sa_handler == SIG_DFL)
65864 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65865 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65866 + is_unhandled = 1;
65867 ret = specific_send_sig_info(sig, info, t);
65868 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65869
65870 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
65871 + normal operation */
65872 + if (is_unhandled) {
65873 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65874 + gr_handle_crash(t, sig);
65875 + }
65876 +
65877 return ret;
65878 }
65879
65880 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
65881 ret = check_kill_permission(sig, info, p);
65882 rcu_read_unlock();
65883
65884 - if (!ret && sig)
65885 + if (!ret && sig) {
65886 ret = do_send_sig_info(sig, info, p, true);
65887 + if (!ret)
65888 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65889 + }
65890
65891 return ret;
65892 }
65893 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
65894 {
65895 siginfo_t info;
65896
65897 + pax_track_stack();
65898 +
65899 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65900
65901 memset(&info, 0, sizeof info);
65902 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65903 int error = -ESRCH;
65904
65905 rcu_read_lock();
65906 - p = find_task_by_vpid(pid);
65907 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65908 + /* allow glibc communication via tgkill to other threads in our
65909 + thread group */
65910 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65911 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
65912 + p = find_task_by_vpid_unrestricted(pid);
65913 + else
65914 +#endif
65915 + p = find_task_by_vpid(pid);
65916 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65917 error = check_kill_permission(sig, info, p);
65918 /*
65919 diff -urNp linux-3.0.8/kernel/smp.c linux-3.0.8/kernel/smp.c
65920 --- linux-3.0.8/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
65921 +++ linux-3.0.8/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
65922 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65923 }
65924 EXPORT_SYMBOL(smp_call_function);
65925
65926 -void ipi_call_lock(void)
65927 +void ipi_call_lock(void) __acquires(call_function.lock)
65928 {
65929 raw_spin_lock(&call_function.lock);
65930 }
65931
65932 -void ipi_call_unlock(void)
65933 +void ipi_call_unlock(void) __releases(call_function.lock)
65934 {
65935 raw_spin_unlock(&call_function.lock);
65936 }
65937
65938 -void ipi_call_lock_irq(void)
65939 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
65940 {
65941 raw_spin_lock_irq(&call_function.lock);
65942 }
65943
65944 -void ipi_call_unlock_irq(void)
65945 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
65946 {
65947 raw_spin_unlock_irq(&call_function.lock);
65948 }
65949 diff -urNp linux-3.0.8/kernel/softirq.c linux-3.0.8/kernel/softirq.c
65950 --- linux-3.0.8/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
65951 +++ linux-3.0.8/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
65952 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65953
65954 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65955
65956 -char *softirq_to_name[NR_SOFTIRQS] = {
65957 +const char * const softirq_to_name[NR_SOFTIRQS] = {
65958 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65959 "TASKLET", "SCHED", "HRTIMER", "RCU"
65960 };
65961 @@ -235,7 +235,7 @@ restart:
65962 kstat_incr_softirqs_this_cpu(vec_nr);
65963
65964 trace_softirq_entry(vec_nr);
65965 - h->action(h);
65966 + h->action();
65967 trace_softirq_exit(vec_nr);
65968 if (unlikely(prev_count != preempt_count())) {
65969 printk(KERN_ERR "huh, entered softirq %u %s %p"
65970 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65971 local_irq_restore(flags);
65972 }
65973
65974 -void open_softirq(int nr, void (*action)(struct softirq_action *))
65975 +void open_softirq(int nr, void (*action)(void))
65976 {
65977 - softirq_vec[nr].action = action;
65978 + pax_open_kernel();
65979 + *(void **)&softirq_vec[nr].action = action;
65980 + pax_close_kernel();
65981 }
65982
65983 /*
65984 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65985
65986 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65987
65988 -static void tasklet_action(struct softirq_action *a)
65989 +static void tasklet_action(void)
65990 {
65991 struct tasklet_struct *list;
65992
65993 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65994 }
65995 }
65996
65997 -static void tasklet_hi_action(struct softirq_action *a)
65998 +static void tasklet_hi_action(void)
65999 {
66000 struct tasklet_struct *list;
66001
66002 diff -urNp linux-3.0.8/kernel/sys.c linux-3.0.8/kernel/sys.c
66003 --- linux-3.0.8/kernel/sys.c 2011-10-25 09:10:33.000000000 -0400
66004 +++ linux-3.0.8/kernel/sys.c 2011-10-25 09:10:41.000000000 -0400
66005 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
66006 error = -EACCES;
66007 goto out;
66008 }
66009 +
66010 + if (gr_handle_chroot_setpriority(p, niceval)) {
66011 + error = -EACCES;
66012 + goto out;
66013 + }
66014 +
66015 no_nice = security_task_setnice(p, niceval);
66016 if (no_nice) {
66017 error = no_nice;
66018 @@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
66019 goto error;
66020 }
66021
66022 + if (gr_check_group_change(new->gid, new->egid, -1))
66023 + goto error;
66024 +
66025 if (rgid != (gid_t) -1 ||
66026 (egid != (gid_t) -1 && egid != old->gid))
66027 new->sgid = new->egid;
66028 @@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66029 old = current_cred();
66030
66031 retval = -EPERM;
66032 +
66033 + if (gr_check_group_change(gid, gid, gid))
66034 + goto error;
66035 +
66036 if (nsown_capable(CAP_SETGID))
66037 new->gid = new->egid = new->sgid = new->fsgid = gid;
66038 else if (gid == old->gid || gid == old->sgid)
66039 @@ -595,11 +608,18 @@ static int set_user(struct cred *new)
66040 if (!new_user)
66041 return -EAGAIN;
66042
66043 + /*
66044 + * We don't fail in case of NPROC limit excess here because too many
66045 + * poorly written programs don't check set*uid() return code, assuming
66046 + * it never fails if called by root. We may still enforce NPROC limit
66047 + * for programs doing set*uid()+execve() by harmlessly deferring the
66048 + * failure to the execve() stage.
66049 + */
66050 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
66051 - new_user != INIT_USER) {
66052 - free_uid(new_user);
66053 - return -EAGAIN;
66054 - }
66055 + new_user != INIT_USER)
66056 + current->flags |= PF_NPROC_EXCEEDED;
66057 + else
66058 + current->flags &= ~PF_NPROC_EXCEEDED;
66059
66060 free_uid(new->user);
66061 new->user = new_user;
66062 @@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
66063 goto error;
66064 }
66065
66066 + if (gr_check_user_change(new->uid, new->euid, -1))
66067 + goto error;
66068 +
66069 if (new->uid != old->uid) {
66070 retval = set_user(new);
66071 if (retval < 0)
66072 @@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66073 old = current_cred();
66074
66075 retval = -EPERM;
66076 +
66077 + if (gr_check_crash_uid(uid))
66078 + goto error;
66079 + if (gr_check_user_change(uid, uid, uid))
66080 + goto error;
66081 +
66082 if (nsown_capable(CAP_SETUID)) {
66083 new->suid = new->uid = uid;
66084 if (uid != old->uid) {
66085 @@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
66086 goto error;
66087 }
66088
66089 + if (gr_check_user_change(ruid, euid, -1))
66090 + goto error;
66091 +
66092 if (ruid != (uid_t) -1) {
66093 new->uid = ruid;
66094 if (ruid != old->uid) {
66095 @@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
66096 goto error;
66097 }
66098
66099 + if (gr_check_group_change(rgid, egid, -1))
66100 + goto error;
66101 +
66102 if (rgid != (gid_t) -1)
66103 new->gid = rgid;
66104 if (egid != (gid_t) -1)
66105 @@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66106 old = current_cred();
66107 old_fsuid = old->fsuid;
66108
66109 + if (gr_check_user_change(-1, -1, uid))
66110 + goto error;
66111 +
66112 if (uid == old->uid || uid == old->euid ||
66113 uid == old->suid || uid == old->fsuid ||
66114 nsown_capable(CAP_SETUID)) {
66115 @@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66116 }
66117 }
66118
66119 +error:
66120 abort_creds(new);
66121 return old_fsuid;
66122
66123 @@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66124 if (gid == old->gid || gid == old->egid ||
66125 gid == old->sgid || gid == old->fsgid ||
66126 nsown_capable(CAP_SETGID)) {
66127 + if (gr_check_group_change(-1, -1, gid))
66128 + goto error;
66129 +
66130 if (gid != old_fsgid) {
66131 new->fsgid = gid;
66132 goto change_okay;
66133 }
66134 }
66135
66136 +error:
66137 abort_creds(new);
66138 return old_fsgid;
66139
66140 @@ -1205,19 +1248,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
66141 return -EFAULT;
66142
66143 down_read(&uts_sem);
66144 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66145 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66146 __OLD_UTS_LEN);
66147 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66148 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66149 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66150 __OLD_UTS_LEN);
66151 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66152 - error |= __copy_to_user(&name->release, &utsname()->release,
66153 + error |= __copy_to_user(name->release, &utsname()->release,
66154 __OLD_UTS_LEN);
66155 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66156 - error |= __copy_to_user(&name->version, &utsname()->version,
66157 + error |= __copy_to_user(name->version, &utsname()->version,
66158 __OLD_UTS_LEN);
66159 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66160 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66161 + error |= __copy_to_user(name->machine, &utsname()->machine,
66162 __OLD_UTS_LEN);
66163 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66164 up_read(&uts_sem);
66165 @@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
66166 error = get_dumpable(me->mm);
66167 break;
66168 case PR_SET_DUMPABLE:
66169 - if (arg2 < 0 || arg2 > 1) {
66170 + if (arg2 > 1) {
66171 error = -EINVAL;
66172 break;
66173 }
66174 diff -urNp linux-3.0.8/kernel/sysctl_binary.c linux-3.0.8/kernel/sysctl_binary.c
66175 --- linux-3.0.8/kernel/sysctl_binary.c 2011-07-21 22:17:23.000000000 -0400
66176 +++ linux-3.0.8/kernel/sysctl_binary.c 2011-10-06 04:17:55.000000000 -0400
66177 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
66178 int i;
66179
66180 set_fs(KERNEL_DS);
66181 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66182 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66183 set_fs(old_fs);
66184 if (result < 0)
66185 goto out_kfree;
66186 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
66187 }
66188
66189 set_fs(KERNEL_DS);
66190 - result = vfs_write(file, buffer, str - buffer, &pos);
66191 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66192 set_fs(old_fs);
66193 if (result < 0)
66194 goto out_kfree;
66195 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
66196 int i;
66197
66198 set_fs(KERNEL_DS);
66199 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66200 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66201 set_fs(old_fs);
66202 if (result < 0)
66203 goto out_kfree;
66204 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
66205 }
66206
66207 set_fs(KERNEL_DS);
66208 - result = vfs_write(file, buffer, str - buffer, &pos);
66209 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66210 set_fs(old_fs);
66211 if (result < 0)
66212 goto out_kfree;
66213 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
66214 int i;
66215
66216 set_fs(KERNEL_DS);
66217 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66218 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66219 set_fs(old_fs);
66220 if (result < 0)
66221 goto out;
66222 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
66223 __le16 dnaddr;
66224
66225 set_fs(KERNEL_DS);
66226 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66227 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66228 set_fs(old_fs);
66229 if (result < 0)
66230 goto out;
66231 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
66232 le16_to_cpu(dnaddr) & 0x3ff);
66233
66234 set_fs(KERNEL_DS);
66235 - result = vfs_write(file, buf, len, &pos);
66236 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66237 set_fs(old_fs);
66238 if (result < 0)
66239 goto out;
66240 diff -urNp linux-3.0.8/kernel/sysctl.c linux-3.0.8/kernel/sysctl.c
66241 --- linux-3.0.8/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
66242 +++ linux-3.0.8/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
66243 @@ -85,6 +85,13 @@
66244
66245
66246 #if defined(CONFIG_SYSCTL)
66247 +#include <linux/grsecurity.h>
66248 +#include <linux/grinternal.h>
66249 +
66250 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66251 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66252 + const int op);
66253 +extern int gr_handle_chroot_sysctl(const int op);
66254
66255 /* External variables not in a header file. */
66256 extern int sysctl_overcommit_memory;
66257 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
66258 }
66259
66260 #endif
66261 +extern struct ctl_table grsecurity_table[];
66262
66263 static struct ctl_table root_table[];
66264 static struct ctl_table_root sysctl_table_root;
66265 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
66266 int sysctl_legacy_va_layout;
66267 #endif
66268
66269 +#ifdef CONFIG_PAX_SOFTMODE
66270 +static ctl_table pax_table[] = {
66271 + {
66272 + .procname = "softmode",
66273 + .data = &pax_softmode,
66274 + .maxlen = sizeof(unsigned int),
66275 + .mode = 0600,
66276 + .proc_handler = &proc_dointvec,
66277 + },
66278 +
66279 + { }
66280 +};
66281 +#endif
66282 +
66283 /* The default sysctl tables: */
66284
66285 static struct ctl_table root_table[] = {
66286 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
66287 #endif
66288
66289 static struct ctl_table kern_table[] = {
66290 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66291 + {
66292 + .procname = "grsecurity",
66293 + .mode = 0500,
66294 + .child = grsecurity_table,
66295 + },
66296 +#endif
66297 +
66298 +#ifdef CONFIG_PAX_SOFTMODE
66299 + {
66300 + .procname = "pax",
66301 + .mode = 0500,
66302 + .child = pax_table,
66303 + },
66304 +#endif
66305 +
66306 {
66307 .procname = "sched_child_runs_first",
66308 .data = &sysctl_sched_child_runs_first,
66309 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
66310 .data = &modprobe_path,
66311 .maxlen = KMOD_PATH_LEN,
66312 .mode = 0644,
66313 - .proc_handler = proc_dostring,
66314 + .proc_handler = proc_dostring_modpriv,
66315 },
66316 {
66317 .procname = "modules_disabled",
66318 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
66319 .extra1 = &zero,
66320 .extra2 = &one,
66321 },
66322 +#endif
66323 {
66324 .procname = "kptr_restrict",
66325 .data = &kptr_restrict,
66326 .maxlen = sizeof(int),
66327 .mode = 0644,
66328 .proc_handler = proc_dmesg_restrict,
66329 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66330 + .extra1 = &two,
66331 +#else
66332 .extra1 = &zero,
66333 +#endif
66334 .extra2 = &two,
66335 },
66336 -#endif
66337 {
66338 .procname = "ngroups_max",
66339 .data = &ngroups_max,
66340 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
66341 .proc_handler = proc_dointvec_minmax,
66342 .extra1 = &zero,
66343 },
66344 + {
66345 + .procname = "heap_stack_gap",
66346 + .data = &sysctl_heap_stack_gap,
66347 + .maxlen = sizeof(sysctl_heap_stack_gap),
66348 + .mode = 0644,
66349 + .proc_handler = proc_doulongvec_minmax,
66350 + },
66351 #else
66352 {
66353 .procname = "nr_trim_pages",
66354 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
66355 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66356 {
66357 int mode;
66358 + int error;
66359 +
66360 + if (table->parent != NULL && table->parent->procname != NULL &&
66361 + table->procname != NULL &&
66362 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66363 + return -EACCES;
66364 + if (gr_handle_chroot_sysctl(op))
66365 + return -EACCES;
66366 + error = gr_handle_sysctl(table, op);
66367 + if (error)
66368 + return error;
66369
66370 if (root->permissions)
66371 mode = root->permissions(root, current->nsproxy, table);
66372 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
66373 buffer, lenp, ppos);
66374 }
66375
66376 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66377 + void __user *buffer, size_t *lenp, loff_t *ppos)
66378 +{
66379 + if (write && !capable(CAP_SYS_MODULE))
66380 + return -EPERM;
66381 +
66382 + return _proc_do_string(table->data, table->maxlen, write,
66383 + buffer, lenp, ppos);
66384 +}
66385 +
66386 static size_t proc_skip_spaces(char **buf)
66387 {
66388 size_t ret;
66389 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
66390 len = strlen(tmp);
66391 if (len > *size)
66392 len = *size;
66393 + if (len > sizeof(tmp))
66394 + len = sizeof(tmp);
66395 if (copy_to_user(*buf, tmp, len))
66396 return -EFAULT;
66397 *size -= len;
66398 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
66399 *i = val;
66400 } else {
66401 val = convdiv * (*i) / convmul;
66402 - if (!first)
66403 + if (!first) {
66404 err = proc_put_char(&buffer, &left, '\t');
66405 + if (err)
66406 + break;
66407 + }
66408 err = proc_put_long(&buffer, &left, val, false);
66409 if (err)
66410 break;
66411 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
66412 return -ENOSYS;
66413 }
66414
66415 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66416 + void __user *buffer, size_t *lenp, loff_t *ppos)
66417 +{
66418 + return -ENOSYS;
66419 +}
66420 +
66421 int proc_dointvec(struct ctl_table *table, int write,
66422 void __user *buffer, size_t *lenp, loff_t *ppos)
66423 {
66424 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66425 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66426 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66427 EXPORT_SYMBOL(proc_dostring);
66428 +EXPORT_SYMBOL(proc_dostring_modpriv);
66429 EXPORT_SYMBOL(proc_doulongvec_minmax);
66430 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66431 EXPORT_SYMBOL(register_sysctl_table);
66432 diff -urNp linux-3.0.8/kernel/sysctl_check.c linux-3.0.8/kernel/sysctl_check.c
66433 --- linux-3.0.8/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
66434 +++ linux-3.0.8/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
66435 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
66436 set_fail(&fail, table, "Directory with extra2");
66437 } else {
66438 if ((table->proc_handler == proc_dostring) ||
66439 + (table->proc_handler == proc_dostring_modpriv) ||
66440 (table->proc_handler == proc_dointvec) ||
66441 (table->proc_handler == proc_dointvec_minmax) ||
66442 (table->proc_handler == proc_dointvec_jiffies) ||
66443 diff -urNp linux-3.0.8/kernel/taskstats.c linux-3.0.8/kernel/taskstats.c
66444 --- linux-3.0.8/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
66445 +++ linux-3.0.8/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
66446 @@ -27,9 +27,12 @@
66447 #include <linux/cgroup.h>
66448 #include <linux/fs.h>
66449 #include <linux/file.h>
66450 +#include <linux/grsecurity.h>
66451 #include <net/genetlink.h>
66452 #include <asm/atomic.h>
66453
66454 +extern int gr_is_taskstats_denied(int pid);
66455 +
66456 /*
66457 * Maximum length of a cpumask that can be specified in
66458 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66459 @@ -558,6 +561,9 @@ err:
66460
66461 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66462 {
66463 + if (gr_is_taskstats_denied(current->pid))
66464 + return -EACCES;
66465 +
66466 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66467 return cmd_attr_register_cpumask(info);
66468 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66469 diff -urNp linux-3.0.8/kernel/time/alarmtimer.c linux-3.0.8/kernel/time/alarmtimer.c
66470 --- linux-3.0.8/kernel/time/alarmtimer.c 2011-10-24 08:05:30.000000000 -0400
66471 +++ linux-3.0.8/kernel/time/alarmtimer.c 2011-10-16 21:55:28.000000000 -0400
66472 @@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
66473 {
66474 int error = 0;
66475 int i;
66476 - struct k_clock alarm_clock = {
66477 + static struct k_clock alarm_clock = {
66478 .clock_getres = alarm_clock_getres,
66479 .clock_get = alarm_clock_get,
66480 .timer_create = alarm_timer_create,
66481 diff -urNp linux-3.0.8/kernel/time/tick-broadcast.c linux-3.0.8/kernel/time/tick-broadcast.c
66482 --- linux-3.0.8/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
66483 +++ linux-3.0.8/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
66484 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
66485 * then clear the broadcast bit.
66486 */
66487 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66488 - int cpu = smp_processor_id();
66489 + cpu = smp_processor_id();
66490
66491 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66492 tick_broadcast_clear_oneshot(cpu);
66493 diff -urNp linux-3.0.8/kernel/time/timekeeping.c linux-3.0.8/kernel/time/timekeeping.c
66494 --- linux-3.0.8/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
66495 +++ linux-3.0.8/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
66496 @@ -14,6 +14,7 @@
66497 #include <linux/init.h>
66498 #include <linux/mm.h>
66499 #include <linux/sched.h>
66500 +#include <linux/grsecurity.h>
66501 #include <linux/syscore_ops.h>
66502 #include <linux/clocksource.h>
66503 #include <linux/jiffies.h>
66504 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
66505 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66506 return -EINVAL;
66507
66508 + gr_log_timechange();
66509 +
66510 write_seqlock_irqsave(&xtime_lock, flags);
66511
66512 timekeeping_forward_now();
66513 diff -urNp linux-3.0.8/kernel/time/timer_list.c linux-3.0.8/kernel/time/timer_list.c
66514 --- linux-3.0.8/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
66515 +++ linux-3.0.8/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
66516 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66517
66518 static void print_name_offset(struct seq_file *m, void *sym)
66519 {
66520 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66521 + SEQ_printf(m, "<%p>", NULL);
66522 +#else
66523 char symname[KSYM_NAME_LEN];
66524
66525 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66526 SEQ_printf(m, "<%pK>", sym);
66527 else
66528 SEQ_printf(m, "%s", symname);
66529 +#endif
66530 }
66531
66532 static void
66533 @@ -112,7 +116,11 @@ next_one:
66534 static void
66535 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66536 {
66537 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66538 + SEQ_printf(m, " .base: %p\n", NULL);
66539 +#else
66540 SEQ_printf(m, " .base: %pK\n", base);
66541 +#endif
66542 SEQ_printf(m, " .index: %d\n",
66543 base->index);
66544 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66545 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
66546 {
66547 struct proc_dir_entry *pe;
66548
66549 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66550 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66551 +#else
66552 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66553 +#endif
66554 if (!pe)
66555 return -ENOMEM;
66556 return 0;
66557 diff -urNp linux-3.0.8/kernel/time/timer_stats.c linux-3.0.8/kernel/time/timer_stats.c
66558 --- linux-3.0.8/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
66559 +++ linux-3.0.8/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
66560 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66561 static unsigned long nr_entries;
66562 static struct entry entries[MAX_ENTRIES];
66563
66564 -static atomic_t overflow_count;
66565 +static atomic_unchecked_t overflow_count;
66566
66567 /*
66568 * The entries are in a hash-table, for fast lookup:
66569 @@ -140,7 +140,7 @@ static void reset_entries(void)
66570 nr_entries = 0;
66571 memset(entries, 0, sizeof(entries));
66572 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66573 - atomic_set(&overflow_count, 0);
66574 + atomic_set_unchecked(&overflow_count, 0);
66575 }
66576
66577 static struct entry *alloc_entry(void)
66578 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66579 if (likely(entry))
66580 entry->count++;
66581 else
66582 - atomic_inc(&overflow_count);
66583 + atomic_inc_unchecked(&overflow_count);
66584
66585 out_unlock:
66586 raw_spin_unlock_irqrestore(lock, flags);
66587 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66588
66589 static void print_name_offset(struct seq_file *m, unsigned long addr)
66590 {
66591 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66592 + seq_printf(m, "<%p>", NULL);
66593 +#else
66594 char symname[KSYM_NAME_LEN];
66595
66596 if (lookup_symbol_name(addr, symname) < 0)
66597 seq_printf(m, "<%p>", (void *)addr);
66598 else
66599 seq_printf(m, "%s", symname);
66600 +#endif
66601 }
66602
66603 static int tstats_show(struct seq_file *m, void *v)
66604 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66605
66606 seq_puts(m, "Timer Stats Version: v0.2\n");
66607 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66608 - if (atomic_read(&overflow_count))
66609 + if (atomic_read_unchecked(&overflow_count))
66610 seq_printf(m, "Overflow: %d entries\n",
66611 - atomic_read(&overflow_count));
66612 + atomic_read_unchecked(&overflow_count));
66613
66614 for (i = 0; i < nr_entries; i++) {
66615 entry = entries + i;
66616 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
66617 {
66618 struct proc_dir_entry *pe;
66619
66620 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66621 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66622 +#else
66623 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66624 +#endif
66625 if (!pe)
66626 return -ENOMEM;
66627 return 0;
66628 diff -urNp linux-3.0.8/kernel/time.c linux-3.0.8/kernel/time.c
66629 --- linux-3.0.8/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
66630 +++ linux-3.0.8/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
66631 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
66632 return error;
66633
66634 if (tz) {
66635 + /* we log in do_settimeofday called below, so don't log twice
66636 + */
66637 + if (!tv)
66638 + gr_log_timechange();
66639 +
66640 /* SMP safe, global irq locking makes it work. */
66641 sys_tz = *tz;
66642 update_vsyscall_tz();
66643 diff -urNp linux-3.0.8/kernel/timer.c linux-3.0.8/kernel/timer.c
66644 --- linux-3.0.8/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
66645 +++ linux-3.0.8/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
66646 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66647 /*
66648 * This function runs timers and the timer-tq in bottom half context.
66649 */
66650 -static void run_timer_softirq(struct softirq_action *h)
66651 +static void run_timer_softirq(void)
66652 {
66653 struct tvec_base *base = __this_cpu_read(tvec_bases);
66654
66655 diff -urNp linux-3.0.8/kernel/trace/blktrace.c linux-3.0.8/kernel/trace/blktrace.c
66656 --- linux-3.0.8/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
66657 +++ linux-3.0.8/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
66658 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
66659 struct blk_trace *bt = filp->private_data;
66660 char buf[16];
66661
66662 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66663 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66664
66665 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66666 }
66667 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
66668 return 1;
66669
66670 bt = buf->chan->private_data;
66671 - atomic_inc(&bt->dropped);
66672 + atomic_inc_unchecked(&bt->dropped);
66673 return 0;
66674 }
66675
66676 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
66677
66678 bt->dir = dir;
66679 bt->dev = dev;
66680 - atomic_set(&bt->dropped, 0);
66681 + atomic_set_unchecked(&bt->dropped, 0);
66682
66683 ret = -EIO;
66684 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66685 diff -urNp linux-3.0.8/kernel/trace/ftrace.c linux-3.0.8/kernel/trace/ftrace.c
66686 --- linux-3.0.8/kernel/trace/ftrace.c 2011-10-24 08:05:32.000000000 -0400
66687 +++ linux-3.0.8/kernel/trace/ftrace.c 2011-10-17 23:17:19.000000000 -0400
66688 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
66689 if (unlikely(ftrace_disabled))
66690 return 0;
66691
66692 + ret = ftrace_arch_code_modify_prepare();
66693 + FTRACE_WARN_ON(ret);
66694 + if (ret)
66695 + return 0;
66696 +
66697 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66698 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66699 if (ret) {
66700 ftrace_bug(ret, ip);
66701 - return 0;
66702 }
66703 - return 1;
66704 + return ret ? 0 : 1;
66705 }
66706
66707 /*
66708 @@ -2570,7 +2575,7 @@ static void ftrace_free_entry_rcu(struct
66709
66710 int
66711 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66712 - void *data)
66713 + void *data)
66714 {
66715 struct ftrace_func_probe *entry;
66716 struct ftrace_page *pg;
66717 diff -urNp linux-3.0.8/kernel/trace/trace.c linux-3.0.8/kernel/trace/trace.c
66718 --- linux-3.0.8/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
66719 +++ linux-3.0.8/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
66720 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
66721 size_t rem;
66722 unsigned int i;
66723
66724 + pax_track_stack();
66725 +
66726 if (splice_grow_spd(pipe, &spd))
66727 return -ENOMEM;
66728
66729 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
66730 int entries, size, i;
66731 size_t ret;
66732
66733 + pax_track_stack();
66734 +
66735 if (splice_grow_spd(pipe, &spd))
66736 return -ENOMEM;
66737
66738 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
66739 };
66740 #endif
66741
66742 -static struct dentry *d_tracer;
66743 -
66744 struct dentry *tracing_init_dentry(void)
66745 {
66746 + static struct dentry *d_tracer;
66747 static int once;
66748
66749 if (d_tracer)
66750 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
66751 return d_tracer;
66752 }
66753
66754 -static struct dentry *d_percpu;
66755 -
66756 struct dentry *tracing_dentry_percpu(void)
66757 {
66758 + static struct dentry *d_percpu;
66759 static int once;
66760 struct dentry *d_tracer;
66761
66762 diff -urNp linux-3.0.8/kernel/trace/trace_events.c linux-3.0.8/kernel/trace/trace_events.c
66763 --- linux-3.0.8/kernel/trace/trace_events.c 2011-10-24 08:05:21.000000000 -0400
66764 +++ linux-3.0.8/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
66765 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
66766 struct ftrace_module_file_ops {
66767 struct list_head list;
66768 struct module *mod;
66769 - struct file_operations id;
66770 - struct file_operations enable;
66771 - struct file_operations format;
66772 - struct file_operations filter;
66773 };
66774
66775 static struct ftrace_module_file_ops *
66776 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
66777
66778 file_ops->mod = mod;
66779
66780 - file_ops->id = ftrace_event_id_fops;
66781 - file_ops->id.owner = mod;
66782 -
66783 - file_ops->enable = ftrace_enable_fops;
66784 - file_ops->enable.owner = mod;
66785 -
66786 - file_ops->filter = ftrace_event_filter_fops;
66787 - file_ops->filter.owner = mod;
66788 -
66789 - file_ops->format = ftrace_event_format_fops;
66790 - file_ops->format.owner = mod;
66791 + pax_open_kernel();
66792 + *(void **)&mod->trace_id.owner = mod;
66793 + *(void **)&mod->trace_enable.owner = mod;
66794 + *(void **)&mod->trace_filter.owner = mod;
66795 + *(void **)&mod->trace_format.owner = mod;
66796 + pax_close_kernel();
66797
66798 list_add(&file_ops->list, &ftrace_module_file_list);
66799
66800 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
66801
66802 for_each_event(call, start, end) {
66803 __trace_add_event_call(*call, mod,
66804 - &file_ops->id, &file_ops->enable,
66805 - &file_ops->filter, &file_ops->format);
66806 + &mod->trace_id, &mod->trace_enable,
66807 + &mod->trace_filter, &mod->trace_format);
66808 }
66809 }
66810
66811 diff -urNp linux-3.0.8/kernel/trace/trace_kprobe.c linux-3.0.8/kernel/trace/trace_kprobe.c
66812 --- linux-3.0.8/kernel/trace/trace_kprobe.c 2011-07-21 22:17:23.000000000 -0400
66813 +++ linux-3.0.8/kernel/trace/trace_kprobe.c 2011-10-06 04:17:55.000000000 -0400
66814 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66815 long ret;
66816 int maxlen = get_rloc_len(*(u32 *)dest);
66817 u8 *dst = get_rloc_data(dest);
66818 - u8 *src = addr;
66819 + const u8 __user *src = (const u8 __force_user *)addr;
66820 mm_segment_t old_fs = get_fs();
66821 if (!maxlen)
66822 return;
66823 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66824 pagefault_disable();
66825 do
66826 ret = __copy_from_user_inatomic(dst++, src++, 1);
66827 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66828 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66829 dst[-1] = '\0';
66830 pagefault_enable();
66831 set_fs(old_fs);
66832 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66833 ((u8 *)get_rloc_data(dest))[0] = '\0';
66834 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66835 } else
66836 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66837 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66838 get_rloc_offs(*(u32 *)dest));
66839 }
66840 /* Return the length of string -- including null terminal byte */
66841 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66842 set_fs(KERNEL_DS);
66843 pagefault_disable();
66844 do {
66845 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66846 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66847 len++;
66848 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66849 pagefault_enable();
66850 diff -urNp linux-3.0.8/kernel/trace/trace_mmiotrace.c linux-3.0.8/kernel/trace/trace_mmiotrace.c
66851 --- linux-3.0.8/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
66852 +++ linux-3.0.8/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
66853 @@ -24,7 +24,7 @@ struct header_iter {
66854 static struct trace_array *mmio_trace_array;
66855 static bool overrun_detected;
66856 static unsigned long prev_overruns;
66857 -static atomic_t dropped_count;
66858 +static atomic_unchecked_t dropped_count;
66859
66860 static void mmio_reset_data(struct trace_array *tr)
66861 {
66862 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66863
66864 static unsigned long count_overruns(struct trace_iterator *iter)
66865 {
66866 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66867 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66868 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66869
66870 if (over > prev_overruns)
66871 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66872 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66873 sizeof(*entry), 0, pc);
66874 if (!event) {
66875 - atomic_inc(&dropped_count);
66876 + atomic_inc_unchecked(&dropped_count);
66877 return;
66878 }
66879 entry = ring_buffer_event_data(event);
66880 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66881 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66882 sizeof(*entry), 0, pc);
66883 if (!event) {
66884 - atomic_inc(&dropped_count);
66885 + atomic_inc_unchecked(&dropped_count);
66886 return;
66887 }
66888 entry = ring_buffer_event_data(event);
66889 diff -urNp linux-3.0.8/kernel/trace/trace_output.c linux-3.0.8/kernel/trace/trace_output.c
66890 --- linux-3.0.8/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
66891 +++ linux-3.0.8/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
66892 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66893
66894 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66895 if (!IS_ERR(p)) {
66896 - p = mangle_path(s->buffer + s->len, p, "\n");
66897 + p = mangle_path(s->buffer + s->len, p, "\n\\");
66898 if (p) {
66899 s->len = p - s->buffer;
66900 return 1;
66901 diff -urNp linux-3.0.8/kernel/trace/trace_stack.c linux-3.0.8/kernel/trace/trace_stack.c
66902 --- linux-3.0.8/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
66903 +++ linux-3.0.8/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
66904 @@ -50,7 +50,7 @@ static inline void check_stack(void)
66905 return;
66906
66907 /* we do not handle interrupt stacks yet */
66908 - if (!object_is_on_stack(&this_size))
66909 + if (!object_starts_on_stack(&this_size))
66910 return;
66911
66912 local_irq_save(flags);
66913 diff -urNp linux-3.0.8/kernel/trace/trace_workqueue.c linux-3.0.8/kernel/trace/trace_workqueue.c
66914 --- linux-3.0.8/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
66915 +++ linux-3.0.8/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
66916 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66917 int cpu;
66918 pid_t pid;
66919 /* Can be inserted from interrupt or user context, need to be atomic */
66920 - atomic_t inserted;
66921 + atomic_unchecked_t inserted;
66922 /*
66923 * Don't need to be atomic, works are serialized in a single workqueue thread
66924 * on a single CPU.
66925 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66926 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66927 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66928 if (node->pid == wq_thread->pid) {
66929 - atomic_inc(&node->inserted);
66930 + atomic_inc_unchecked(&node->inserted);
66931 goto found;
66932 }
66933 }
66934 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66935 tsk = get_pid_task(pid, PIDTYPE_PID);
66936 if (tsk) {
66937 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66938 - atomic_read(&cws->inserted), cws->executed,
66939 + atomic_read_unchecked(&cws->inserted), cws->executed,
66940 tsk->comm);
66941 put_task_struct(tsk);
66942 }
66943 diff -urNp linux-3.0.8/lib/bitmap.c linux-3.0.8/lib/bitmap.c
66944 --- linux-3.0.8/lib/bitmap.c 2011-07-21 22:17:23.000000000 -0400
66945 +++ linux-3.0.8/lib/bitmap.c 2011-10-06 04:17:55.000000000 -0400
66946 @@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
66947 {
66948 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66949 u32 chunk;
66950 - const char __user *ubuf = buf;
66951 + const char __user *ubuf = (const char __force_user *)buf;
66952
66953 bitmap_zero(maskp, nmaskbits);
66954
66955 @@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
66956 {
66957 if (!access_ok(VERIFY_READ, ubuf, ulen))
66958 return -EFAULT;
66959 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66960 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66961 }
66962 EXPORT_SYMBOL(bitmap_parse_user);
66963
66964 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char
66965 {
66966 unsigned a, b;
66967 int c, old_c, totaldigits;
66968 - const char __user *ubuf = buf;
66969 + const char __user *ubuf = (const char __force_user *)buf;
66970 int exp_digit, in_range;
66971
66972 totaldigits = c = 0;
66973 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __u
66974 {
66975 if (!access_ok(VERIFY_READ, ubuf, ulen))
66976 return -EFAULT;
66977 - return __bitmap_parselist((const char *)ubuf,
66978 + return __bitmap_parselist((const char __force_kernel *)ubuf,
66979 ulen, 1, maskp, nmaskbits);
66980 }
66981 EXPORT_SYMBOL(bitmap_parselist_user);
66982 diff -urNp linux-3.0.8/lib/bug.c linux-3.0.8/lib/bug.c
66983 --- linux-3.0.8/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
66984 +++ linux-3.0.8/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
66985 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66986 return BUG_TRAP_TYPE_NONE;
66987
66988 bug = find_bug(bugaddr);
66989 + if (!bug)
66990 + return BUG_TRAP_TYPE_NONE;
66991
66992 file = NULL;
66993 line = 0;
66994 diff -urNp linux-3.0.8/lib/debugobjects.c linux-3.0.8/lib/debugobjects.c
66995 --- linux-3.0.8/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
66996 +++ linux-3.0.8/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
66997 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66998 if (limit > 4)
66999 return;
67000
67001 - is_on_stack = object_is_on_stack(addr);
67002 + is_on_stack = object_starts_on_stack(addr);
67003 if (is_on_stack == onstack)
67004 return;
67005
67006 diff -urNp linux-3.0.8/lib/devres.c linux-3.0.8/lib/devres.c
67007 --- linux-3.0.8/lib/devres.c 2011-07-21 22:17:23.000000000 -0400
67008 +++ linux-3.0.8/lib/devres.c 2011-10-06 04:17:55.000000000 -0400
67009 @@ -81,7 +81,7 @@ void devm_iounmap(struct device *dev, vo
67010 {
67011 iounmap(addr);
67012 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67013 - (void *)addr));
67014 + (void __force *)addr));
67015 }
67016 EXPORT_SYMBOL(devm_iounmap);
67017
67018 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
67019 {
67020 ioport_unmap(addr);
67021 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67022 - devm_ioport_map_match, (void *)addr));
67023 + devm_ioport_map_match, (void __force *)addr));
67024 }
67025 EXPORT_SYMBOL(devm_ioport_unmap);
67026
67027 diff -urNp linux-3.0.8/lib/dma-debug.c linux-3.0.8/lib/dma-debug.c
67028 --- linux-3.0.8/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
67029 +++ linux-3.0.8/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
67030 @@ -870,7 +870,7 @@ out:
67031
67032 static void check_for_stack(struct device *dev, void *addr)
67033 {
67034 - if (object_is_on_stack(addr))
67035 + if (object_starts_on_stack(addr))
67036 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67037 "stack [addr=%p]\n", addr);
67038 }
67039 diff -urNp linux-3.0.8/lib/extable.c linux-3.0.8/lib/extable.c
67040 --- linux-3.0.8/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
67041 +++ linux-3.0.8/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
67042 @@ -13,6 +13,7 @@
67043 #include <linux/init.h>
67044 #include <linux/sort.h>
67045 #include <asm/uaccess.h>
67046 +#include <asm/pgtable.h>
67047
67048 #ifndef ARCH_HAS_SORT_EXTABLE
67049 /*
67050 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
67051 void sort_extable(struct exception_table_entry *start,
67052 struct exception_table_entry *finish)
67053 {
67054 + pax_open_kernel();
67055 sort(start, finish - start, sizeof(struct exception_table_entry),
67056 cmp_ex, NULL);
67057 + pax_close_kernel();
67058 }
67059
67060 #ifdef CONFIG_MODULES
67061 diff -urNp linux-3.0.8/lib/inflate.c linux-3.0.8/lib/inflate.c
67062 --- linux-3.0.8/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
67063 +++ linux-3.0.8/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
67064 @@ -269,7 +269,7 @@ static void free(void *where)
67065 malloc_ptr = free_mem_ptr;
67066 }
67067 #else
67068 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67069 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67070 #define free(a) kfree(a)
67071 #endif
67072
67073 diff -urNp linux-3.0.8/lib/Kconfig.debug linux-3.0.8/lib/Kconfig.debug
67074 --- linux-3.0.8/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
67075 +++ linux-3.0.8/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
67076 @@ -1088,6 +1088,7 @@ config LATENCYTOP
67077 depends on DEBUG_KERNEL
67078 depends on STACKTRACE_SUPPORT
67079 depends on PROC_FS
67080 + depends on !GRKERNSEC_HIDESYM
67081 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
67082 select KALLSYMS
67083 select KALLSYMS_ALL
67084 diff -urNp linux-3.0.8/lib/kref.c linux-3.0.8/lib/kref.c
67085 --- linux-3.0.8/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
67086 +++ linux-3.0.8/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
67087 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67088 */
67089 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67090 {
67091 - WARN_ON(release == NULL);
67092 + BUG_ON(release == NULL);
67093 WARN_ON(release == (void (*)(struct kref *))kfree);
67094
67095 if (atomic_dec_and_test(&kref->refcount)) {
67096 diff -urNp linux-3.0.8/lib/radix-tree.c linux-3.0.8/lib/radix-tree.c
67097 --- linux-3.0.8/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
67098 +++ linux-3.0.8/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
67099 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67100 int nr;
67101 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67102 };
67103 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67104 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67105
67106 static inline void *ptr_to_indirect(void *ptr)
67107 {
67108 diff -urNp linux-3.0.8/lib/vsprintf.c linux-3.0.8/lib/vsprintf.c
67109 --- linux-3.0.8/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
67110 +++ linux-3.0.8/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
67111 @@ -16,6 +16,9 @@
67112 * - scnprintf and vscnprintf
67113 */
67114
67115 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67116 +#define __INCLUDED_BY_HIDESYM 1
67117 +#endif
67118 #include <stdarg.h>
67119 #include <linux/module.h>
67120 #include <linux/types.h>
67121 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
67122 char sym[KSYM_SYMBOL_LEN];
67123 if (ext == 'B')
67124 sprint_backtrace(sym, value);
67125 - else if (ext != 'f' && ext != 's')
67126 + else if (ext != 'f' && ext != 's' && ext != 'a')
67127 sprint_symbol(sym, value);
67128 else
67129 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67130 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
67131 return string(buf, end, uuid, spec);
67132 }
67133
67134 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67135 +int kptr_restrict __read_mostly = 2;
67136 +#else
67137 int kptr_restrict __read_mostly;
67138 +#endif
67139
67140 /*
67141 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67142 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
67143 * - 'S' For symbolic direct pointers with offset
67144 * - 's' For symbolic direct pointers without offset
67145 * - 'B' For backtraced symbolic direct pointers with offset
67146 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67147 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67148 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67149 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67150 * - 'M' For a 6-byte MAC address, it prints the address in the
67151 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
67152 {
67153 if (!ptr && *fmt != 'K') {
67154 /*
67155 - * Print (null) with the same width as a pointer so it makes
67156 + * Print (nil) with the same width as a pointer so it makes
67157 * tabular output look nice.
67158 */
67159 if (spec.field_width == -1)
67160 spec.field_width = 2 * sizeof(void *);
67161 - return string(buf, end, "(null)", spec);
67162 + return string(buf, end, "(nil)", spec);
67163 }
67164
67165 switch (*fmt) {
67166 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
67167 /* Fallthrough */
67168 case 'S':
67169 case 's':
67170 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67171 + break;
67172 +#else
67173 + return symbol_string(buf, end, ptr, spec, *fmt);
67174 +#endif
67175 + case 'A':
67176 + case 'a':
67177 case 'B':
67178 return symbol_string(buf, end, ptr, spec, *fmt);
67179 case 'R':
67180 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
67181 typeof(type) value; \
67182 if (sizeof(type) == 8) { \
67183 args = PTR_ALIGN(args, sizeof(u32)); \
67184 - *(u32 *)&value = *(u32 *)args; \
67185 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67186 + *(u32 *)&value = *(const u32 *)args; \
67187 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67188 } else { \
67189 args = PTR_ALIGN(args, sizeof(type)); \
67190 - value = *(typeof(type) *)args; \
67191 + value = *(const typeof(type) *)args; \
67192 } \
67193 args += sizeof(type); \
67194 value; \
67195 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
67196 case FORMAT_TYPE_STR: {
67197 const char *str_arg = args;
67198 args += strlen(str_arg) + 1;
67199 - str = string(str, end, (char *)str_arg, spec);
67200 + str = string(str, end, str_arg, spec);
67201 break;
67202 }
67203
67204 diff -urNp linux-3.0.8/localversion-grsec linux-3.0.8/localversion-grsec
67205 --- linux-3.0.8/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
67206 +++ linux-3.0.8/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
67207 @@ -0,0 +1 @@
67208 +-grsec
67209 diff -urNp linux-3.0.8/Makefile linux-3.0.8/Makefile
67210 --- linux-3.0.8/Makefile 2011-10-25 09:10:33.000000000 -0400
67211 +++ linux-3.0.8/Makefile 2011-11-01 05:25:30.000000000 -0400
67212 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
67213
67214 HOSTCC = gcc
67215 HOSTCXX = g++
67216 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
67217 -HOSTCXXFLAGS = -O2
67218 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
67219 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
67220 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
67221
67222 # Decide whether to build built-in, modular, or both.
67223 # Normally, just do built-in.
67224 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
67225 # Rules shared between *config targets and build targets
67226
67227 # Basic helpers built in scripts/
67228 -PHONY += scripts_basic
67229 -scripts_basic:
67230 +PHONY += scripts_basic gcc-plugins
67231 +scripts_basic: gcc-plugins
67232 $(Q)$(MAKE) $(build)=scripts/basic
67233 $(Q)rm -f .tmp_quiet_recordmcount
67234
67235 @@ -564,6 +565,38 @@ else
67236 KBUILD_CFLAGS += -O2
67237 endif
67238
67239 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
67240 +ifdef CONFIG_PAX_CONSTIFY_PLUGIN
67241 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
67242 +endif
67243 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
67244 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67245 +endif
67246 +ifdef CONFIG_KALLOCSTAT_PLUGIN
67247 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
67248 +endif
67249 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
67250 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
67251 +endif
67252 +ifdef CONFIG_CHECKER_PLUGIN
67253 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
67254 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
67255 +endif
67256 +endif
67257 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
67258 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
67259 +gcc-plugins:
67260 + $(Q)$(MAKE) $(build)=tools/gcc
67261 +else
67262 +gcc-plugins:
67263 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67264 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67265 +else
67266 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67267 +endif
67268 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67269 +endif
67270 +
67271 include $(srctree)/arch/$(SRCARCH)/Makefile
67272
67273 ifneq ($(CONFIG_FRAME_WARN),0)
67274 @@ -708,7 +741,7 @@ export mod_strip_cmd
67275
67276
67277 ifeq ($(KBUILD_EXTMOD),)
67278 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67279 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67280
67281 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67282 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67283 @@ -932,6 +965,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
67284
67285 # The actual objects are generated when descending,
67286 # make sure no implicit rule kicks in
67287 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
67288 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
67289
67290 # Handle descending into subdirectories listed in $(vmlinux-dirs)
67291 @@ -941,7 +975,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
67292 # Error messages still appears in the original language
67293
67294 PHONY += $(vmlinux-dirs)
67295 -$(vmlinux-dirs): prepare scripts
67296 +$(vmlinux-dirs): gcc-plugins prepare scripts
67297 $(Q)$(MAKE) $(build)=$@
67298
67299 # Store (new) KERNELRELASE string in include/config/kernel.release
67300 @@ -986,6 +1020,7 @@ prepare0: archprepare FORCE
67301 $(Q)$(MAKE) $(build)=. missing-syscalls
67302
67303 # All the preparing..
67304 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
67305 prepare: prepare0
67306
67307 # Generate some files
67308 @@ -1087,6 +1122,7 @@ all: modules
67309 # using awk while concatenating to the final file.
67310
67311 PHONY += modules
67312 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67313 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
67314 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
67315 @$(kecho) ' Building modules, stage 2.';
67316 @@ -1102,7 +1138,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
67317
67318 # Target to prepare building external modules
67319 PHONY += modules_prepare
67320 -modules_prepare: prepare scripts
67321 +modules_prepare: gcc-plugins prepare scripts
67322
67323 # Target to install modules
67324 PHONY += modules_install
67325 @@ -1198,7 +1234,7 @@ distclean: mrproper
67326 @find $(srctree) $(RCS_FIND_IGNORE) \
67327 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
67328 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
67329 - -o -name '.*.rej' -o -size 0 \
67330 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
67331 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
67332 -type f -print | xargs rm -f
67333
67334 @@ -1359,6 +1395,7 @@ PHONY += $(module-dirs) modules
67335 $(module-dirs): crmodverdir $(objtree)/Module.symvers
67336 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
67337
67338 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67339 modules: $(module-dirs)
67340 @$(kecho) ' Building modules, stage 2.';
67341 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
67342 @@ -1485,17 +1522,19 @@ else
67343 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
67344 endif
67345
67346 -%.s: %.c prepare scripts FORCE
67347 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
67348 +%.s: %.c gcc-plugins prepare scripts FORCE
67349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67350 %.i: %.c prepare scripts FORCE
67351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67352 -%.o: %.c prepare scripts FORCE
67353 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
67354 +%.o: %.c gcc-plugins prepare scripts FORCE
67355 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67356 %.lst: %.c prepare scripts FORCE
67357 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67358 -%.s: %.S prepare scripts FORCE
67359 +%.s: %.S gcc-plugins prepare scripts FORCE
67360 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67361 -%.o: %.S prepare scripts FORCE
67362 +%.o: %.S gcc-plugins prepare scripts FORCE
67363 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67364 %.symtypes: %.c prepare scripts FORCE
67365 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67366 @@ -1505,11 +1544,13 @@ endif
67367 $(cmd_crmodverdir)
67368 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67369 $(build)=$(build-dir)
67370 -%/: prepare scripts FORCE
67371 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
67372 +%/: gcc-plugins prepare scripts FORCE
67373 $(cmd_crmodverdir)
67374 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67375 $(build)=$(build-dir)
67376 -%.ko: prepare scripts FORCE
67377 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
67378 +%.ko: gcc-plugins prepare scripts FORCE
67379 $(cmd_crmodverdir)
67380 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67381 $(build)=$(build-dir) $(@:.ko=.o)
67382 diff -urNp linux-3.0.8/mm/filemap.c linux-3.0.8/mm/filemap.c
67383 --- linux-3.0.8/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
67384 +++ linux-3.0.8/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
67385 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
67386 struct address_space *mapping = file->f_mapping;
67387
67388 if (!mapping->a_ops->readpage)
67389 - return -ENOEXEC;
67390 + return -ENODEV;
67391 file_accessed(file);
67392 vma->vm_ops = &generic_file_vm_ops;
67393 vma->vm_flags |= VM_CAN_NONLINEAR;
67394 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
67395 *pos = i_size_read(inode);
67396
67397 if (limit != RLIM_INFINITY) {
67398 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67399 if (*pos >= limit) {
67400 send_sig(SIGXFSZ, current, 0);
67401 return -EFBIG;
67402 diff -urNp linux-3.0.8/mm/fremap.c linux-3.0.8/mm/fremap.c
67403 --- linux-3.0.8/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
67404 +++ linux-3.0.8/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
67405 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67406 retry:
67407 vma = find_vma(mm, start);
67408
67409 +#ifdef CONFIG_PAX_SEGMEXEC
67410 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67411 + goto out;
67412 +#endif
67413 +
67414 /*
67415 * Make sure the vma is shared, that it supports prefaulting,
67416 * and that the remapped range is valid and fully within
67417 diff -urNp linux-3.0.8/mm/highmem.c linux-3.0.8/mm/highmem.c
67418 --- linux-3.0.8/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
67419 +++ linux-3.0.8/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
67420 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67421 * So no dangers, even with speculative execution.
67422 */
67423 page = pte_page(pkmap_page_table[i]);
67424 + pax_open_kernel();
67425 pte_clear(&init_mm, (unsigned long)page_address(page),
67426 &pkmap_page_table[i]);
67427 -
67428 + pax_close_kernel();
67429 set_page_address(page, NULL);
67430 need_flush = 1;
67431 }
67432 @@ -186,9 +187,11 @@ start:
67433 }
67434 }
67435 vaddr = PKMAP_ADDR(last_pkmap_nr);
67436 +
67437 + pax_open_kernel();
67438 set_pte_at(&init_mm, vaddr,
67439 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67440 -
67441 + pax_close_kernel();
67442 pkmap_count[last_pkmap_nr] = 1;
67443 set_page_address(page, (void *)vaddr);
67444
67445 diff -urNp linux-3.0.8/mm/huge_memory.c linux-3.0.8/mm/huge_memory.c
67446 --- linux-3.0.8/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
67447 +++ linux-3.0.8/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
67448 @@ -702,7 +702,7 @@ out:
67449 * run pte_offset_map on the pmd, if an huge pmd could
67450 * materialize from under us from a different thread.
67451 */
67452 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67453 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67454 return VM_FAULT_OOM;
67455 /* if an huge pmd materialized from under us just retry later */
67456 if (unlikely(pmd_trans_huge(*pmd)))
67457 diff -urNp linux-3.0.8/mm/hugetlb.c linux-3.0.8/mm/hugetlb.c
67458 --- linux-3.0.8/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
67459 +++ linux-3.0.8/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
67460 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
67461 return 1;
67462 }
67463
67464 +#ifdef CONFIG_PAX_SEGMEXEC
67465 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67466 +{
67467 + struct mm_struct *mm = vma->vm_mm;
67468 + struct vm_area_struct *vma_m;
67469 + unsigned long address_m;
67470 + pte_t *ptep_m;
67471 +
67472 + vma_m = pax_find_mirror_vma(vma);
67473 + if (!vma_m)
67474 + return;
67475 +
67476 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67477 + address_m = address + SEGMEXEC_TASK_SIZE;
67478 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67479 + get_page(page_m);
67480 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67481 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67482 +}
67483 +#endif
67484 +
67485 /*
67486 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67487 */
67488 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
67489 make_huge_pte(vma, new_page, 1));
67490 page_remove_rmap(old_page);
67491 hugepage_add_new_anon_rmap(new_page, vma, address);
67492 +
67493 +#ifdef CONFIG_PAX_SEGMEXEC
67494 + pax_mirror_huge_pte(vma, address, new_page);
67495 +#endif
67496 +
67497 /* Make the old page be freed below */
67498 new_page = old_page;
67499 mmu_notifier_invalidate_range_end(mm,
67500 @@ -2591,6 +2617,10 @@ retry:
67501 && (vma->vm_flags & VM_SHARED)));
67502 set_huge_pte_at(mm, address, ptep, new_pte);
67503
67504 +#ifdef CONFIG_PAX_SEGMEXEC
67505 + pax_mirror_huge_pte(vma, address, page);
67506 +#endif
67507 +
67508 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67509 /* Optimization, do the COW without a second fault */
67510 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67511 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
67512 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67513 struct hstate *h = hstate_vma(vma);
67514
67515 +#ifdef CONFIG_PAX_SEGMEXEC
67516 + struct vm_area_struct *vma_m;
67517 +#endif
67518 +
67519 ptep = huge_pte_offset(mm, address);
67520 if (ptep) {
67521 entry = huge_ptep_get(ptep);
67522 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
67523 VM_FAULT_SET_HINDEX(h - hstates);
67524 }
67525
67526 +#ifdef CONFIG_PAX_SEGMEXEC
67527 + vma_m = pax_find_mirror_vma(vma);
67528 + if (vma_m) {
67529 + unsigned long address_m;
67530 +
67531 + if (vma->vm_start > vma_m->vm_start) {
67532 + address_m = address;
67533 + address -= SEGMEXEC_TASK_SIZE;
67534 + vma = vma_m;
67535 + h = hstate_vma(vma);
67536 + } else
67537 + address_m = address + SEGMEXEC_TASK_SIZE;
67538 +
67539 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67540 + return VM_FAULT_OOM;
67541 + address_m &= HPAGE_MASK;
67542 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67543 + }
67544 +#endif
67545 +
67546 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67547 if (!ptep)
67548 return VM_FAULT_OOM;
67549 diff -urNp linux-3.0.8/mm/internal.h linux-3.0.8/mm/internal.h
67550 --- linux-3.0.8/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
67551 +++ linux-3.0.8/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
67552 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67553 * in mm/page_alloc.c
67554 */
67555 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67556 +extern void free_compound_page(struct page *page);
67557 extern void prep_compound_page(struct page *page, unsigned long order);
67558 #ifdef CONFIG_MEMORY_FAILURE
67559 extern bool is_free_buddy_page(struct page *page);
67560 diff -urNp linux-3.0.8/mm/Kconfig linux-3.0.8/mm/Kconfig
67561 --- linux-3.0.8/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
67562 +++ linux-3.0.8/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
67563 @@ -240,7 +240,7 @@ config KSM
67564 config DEFAULT_MMAP_MIN_ADDR
67565 int "Low address space to protect from user allocation"
67566 depends on MMU
67567 - default 4096
67568 + default 65536
67569 help
67570 This is the portion of low virtual memory which should be protected
67571 from userspace allocation. Keeping a user from writing to low pages
67572 diff -urNp linux-3.0.8/mm/kmemleak.c linux-3.0.8/mm/kmemleak.c
67573 --- linux-3.0.8/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
67574 +++ linux-3.0.8/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
67575 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
67576
67577 for (i = 0; i < object->trace_len; i++) {
67578 void *ptr = (void *)object->trace[i];
67579 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67580 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67581 }
67582 }
67583
67584 diff -urNp linux-3.0.8/mm/maccess.c linux-3.0.8/mm/maccess.c
67585 --- linux-3.0.8/mm/maccess.c 2011-07-21 22:17:23.000000000 -0400
67586 +++ linux-3.0.8/mm/maccess.c 2011-10-06 04:17:55.000000000 -0400
67587 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
67588 set_fs(KERNEL_DS);
67589 pagefault_disable();
67590 ret = __copy_from_user_inatomic(dst,
67591 - (__force const void __user *)src, size);
67592 + (const void __force_user *)src, size);
67593 pagefault_enable();
67594 set_fs(old_fs);
67595
67596 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
67597
67598 set_fs(KERNEL_DS);
67599 pagefault_disable();
67600 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67601 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67602 pagefault_enable();
67603 set_fs(old_fs);
67604
67605 diff -urNp linux-3.0.8/mm/madvise.c linux-3.0.8/mm/madvise.c
67606 --- linux-3.0.8/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
67607 +++ linux-3.0.8/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
67608 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
67609 pgoff_t pgoff;
67610 unsigned long new_flags = vma->vm_flags;
67611
67612 +#ifdef CONFIG_PAX_SEGMEXEC
67613 + struct vm_area_struct *vma_m;
67614 +#endif
67615 +
67616 switch (behavior) {
67617 case MADV_NORMAL:
67618 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67619 @@ -110,6 +114,13 @@ success:
67620 /*
67621 * vm_flags is protected by the mmap_sem held in write mode.
67622 */
67623 +
67624 +#ifdef CONFIG_PAX_SEGMEXEC
67625 + vma_m = pax_find_mirror_vma(vma);
67626 + if (vma_m)
67627 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67628 +#endif
67629 +
67630 vma->vm_flags = new_flags;
67631
67632 out:
67633 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67634 struct vm_area_struct ** prev,
67635 unsigned long start, unsigned long end)
67636 {
67637 +
67638 +#ifdef CONFIG_PAX_SEGMEXEC
67639 + struct vm_area_struct *vma_m;
67640 +#endif
67641 +
67642 *prev = vma;
67643 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67644 return -EINVAL;
67645 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67646 zap_page_range(vma, start, end - start, &details);
67647 } else
67648 zap_page_range(vma, start, end - start, NULL);
67649 +
67650 +#ifdef CONFIG_PAX_SEGMEXEC
67651 + vma_m = pax_find_mirror_vma(vma);
67652 + if (vma_m) {
67653 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67654 + struct zap_details details = {
67655 + .nonlinear_vma = vma_m,
67656 + .last_index = ULONG_MAX,
67657 + };
67658 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67659 + } else
67660 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67661 + }
67662 +#endif
67663 +
67664 return 0;
67665 }
67666
67667 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67668 if (end < start)
67669 goto out;
67670
67671 +#ifdef CONFIG_PAX_SEGMEXEC
67672 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67673 + if (end > SEGMEXEC_TASK_SIZE)
67674 + goto out;
67675 + } else
67676 +#endif
67677 +
67678 + if (end > TASK_SIZE)
67679 + goto out;
67680 +
67681 error = 0;
67682 if (end == start)
67683 goto out;
67684 diff -urNp linux-3.0.8/mm/memory.c linux-3.0.8/mm/memory.c
67685 --- linux-3.0.8/mm/memory.c 2011-10-24 08:05:21.000000000 -0400
67686 +++ linux-3.0.8/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
67687 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67688 return;
67689
67690 pmd = pmd_offset(pud, start);
67691 +
67692 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67693 pud_clear(pud);
67694 pmd_free_tlb(tlb, pmd, start);
67695 +#endif
67696 +
67697 }
67698
67699 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67700 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67701 if (end - 1 > ceiling - 1)
67702 return;
67703
67704 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67705 pud = pud_offset(pgd, start);
67706 pgd_clear(pgd);
67707 pud_free_tlb(tlb, pud, start);
67708 +#endif
67709 +
67710 }
67711
67712 /*
67713 @@ -1577,12 +1584,6 @@ no_page_table:
67714 return page;
67715 }
67716
67717 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67718 -{
67719 - return stack_guard_page_start(vma, addr) ||
67720 - stack_guard_page_end(vma, addr+PAGE_SIZE);
67721 -}
67722 -
67723 /**
67724 * __get_user_pages() - pin user pages in memory
67725 * @tsk: task_struct of target task
67726 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
67727 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67728 i = 0;
67729
67730 - do {
67731 + while (nr_pages) {
67732 struct vm_area_struct *vma;
67733
67734 - vma = find_extend_vma(mm, start);
67735 + vma = find_vma(mm, start);
67736 if (!vma && in_gate_area(mm, start)) {
67737 unsigned long pg = start & PAGE_MASK;
67738 pgd_t *pgd;
67739 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
67740 goto next_page;
67741 }
67742
67743 - if (!vma ||
67744 + if (!vma || start < vma->vm_start ||
67745 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67746 !(vm_flags & vma->vm_flags))
67747 return i ? : -EFAULT;
67748 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
67749 int ret;
67750 unsigned int fault_flags = 0;
67751
67752 - /* For mlock, just skip the stack guard page. */
67753 - if (foll_flags & FOLL_MLOCK) {
67754 - if (stack_guard_page(vma, start))
67755 - goto next_page;
67756 - }
67757 if (foll_flags & FOLL_WRITE)
67758 fault_flags |= FAULT_FLAG_WRITE;
67759 if (nonblocking)
67760 @@ -1811,7 +1807,7 @@ next_page:
67761 start += PAGE_SIZE;
67762 nr_pages--;
67763 } while (nr_pages && start < vma->vm_end);
67764 - } while (nr_pages);
67765 + }
67766 return i;
67767 }
67768 EXPORT_SYMBOL(__get_user_pages);
67769 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
67770 page_add_file_rmap(page);
67771 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67772
67773 +#ifdef CONFIG_PAX_SEGMEXEC
67774 + pax_mirror_file_pte(vma, addr, page, ptl);
67775 +#endif
67776 +
67777 retval = 0;
67778 pte_unmap_unlock(pte, ptl);
67779 return retval;
67780 @@ -2052,10 +2052,22 @@ out:
67781 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67782 struct page *page)
67783 {
67784 +
67785 +#ifdef CONFIG_PAX_SEGMEXEC
67786 + struct vm_area_struct *vma_m;
67787 +#endif
67788 +
67789 if (addr < vma->vm_start || addr >= vma->vm_end)
67790 return -EFAULT;
67791 if (!page_count(page))
67792 return -EINVAL;
67793 +
67794 +#ifdef CONFIG_PAX_SEGMEXEC
67795 + vma_m = pax_find_mirror_vma(vma);
67796 + if (vma_m)
67797 + vma_m->vm_flags |= VM_INSERTPAGE;
67798 +#endif
67799 +
67800 vma->vm_flags |= VM_INSERTPAGE;
67801 return insert_page(vma, addr, page, vma->vm_page_prot);
67802 }
67803 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
67804 unsigned long pfn)
67805 {
67806 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67807 + BUG_ON(vma->vm_mirror);
67808
67809 if (addr < vma->vm_start || addr >= vma->vm_end)
67810 return -EFAULT;
67811 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
67812 copy_user_highpage(dst, src, va, vma);
67813 }
67814
67815 +#ifdef CONFIG_PAX_SEGMEXEC
67816 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67817 +{
67818 + struct mm_struct *mm = vma->vm_mm;
67819 + spinlock_t *ptl;
67820 + pte_t *pte, entry;
67821 +
67822 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67823 + entry = *pte;
67824 + if (!pte_present(entry)) {
67825 + if (!pte_none(entry)) {
67826 + BUG_ON(pte_file(entry));
67827 + free_swap_and_cache(pte_to_swp_entry(entry));
67828 + pte_clear_not_present_full(mm, address, pte, 0);
67829 + }
67830 + } else {
67831 + struct page *page;
67832 +
67833 + flush_cache_page(vma, address, pte_pfn(entry));
67834 + entry = ptep_clear_flush(vma, address, pte);
67835 + BUG_ON(pte_dirty(entry));
67836 + page = vm_normal_page(vma, address, entry);
67837 + if (page) {
67838 + update_hiwater_rss(mm);
67839 + if (PageAnon(page))
67840 + dec_mm_counter_fast(mm, MM_ANONPAGES);
67841 + else
67842 + dec_mm_counter_fast(mm, MM_FILEPAGES);
67843 + page_remove_rmap(page);
67844 + page_cache_release(page);
67845 + }
67846 + }
67847 + pte_unmap_unlock(pte, ptl);
67848 +}
67849 +
67850 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67851 + *
67852 + * the ptl of the lower mapped page is held on entry and is not released on exit
67853 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67854 + */
67855 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67856 +{
67857 + struct mm_struct *mm = vma->vm_mm;
67858 + unsigned long address_m;
67859 + spinlock_t *ptl_m;
67860 + struct vm_area_struct *vma_m;
67861 + pmd_t *pmd_m;
67862 + pte_t *pte_m, entry_m;
67863 +
67864 + BUG_ON(!page_m || !PageAnon(page_m));
67865 +
67866 + vma_m = pax_find_mirror_vma(vma);
67867 + if (!vma_m)
67868 + return;
67869 +
67870 + BUG_ON(!PageLocked(page_m));
67871 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67872 + address_m = address + SEGMEXEC_TASK_SIZE;
67873 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67874 + pte_m = pte_offset_map(pmd_m, address_m);
67875 + ptl_m = pte_lockptr(mm, pmd_m);
67876 + if (ptl != ptl_m) {
67877 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67878 + if (!pte_none(*pte_m))
67879 + goto out;
67880 + }
67881 +
67882 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67883 + page_cache_get(page_m);
67884 + page_add_anon_rmap(page_m, vma_m, address_m);
67885 + inc_mm_counter_fast(mm, MM_ANONPAGES);
67886 + set_pte_at(mm, address_m, pte_m, entry_m);
67887 + update_mmu_cache(vma_m, address_m, entry_m);
67888 +out:
67889 + if (ptl != ptl_m)
67890 + spin_unlock(ptl_m);
67891 + pte_unmap(pte_m);
67892 + unlock_page(page_m);
67893 +}
67894 +
67895 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67896 +{
67897 + struct mm_struct *mm = vma->vm_mm;
67898 + unsigned long address_m;
67899 + spinlock_t *ptl_m;
67900 + struct vm_area_struct *vma_m;
67901 + pmd_t *pmd_m;
67902 + pte_t *pte_m, entry_m;
67903 +
67904 + BUG_ON(!page_m || PageAnon(page_m));
67905 +
67906 + vma_m = pax_find_mirror_vma(vma);
67907 + if (!vma_m)
67908 + return;
67909 +
67910 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67911 + address_m = address + SEGMEXEC_TASK_SIZE;
67912 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67913 + pte_m = pte_offset_map(pmd_m, address_m);
67914 + ptl_m = pte_lockptr(mm, pmd_m);
67915 + if (ptl != ptl_m) {
67916 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67917 + if (!pte_none(*pte_m))
67918 + goto out;
67919 + }
67920 +
67921 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67922 + page_cache_get(page_m);
67923 + page_add_file_rmap(page_m);
67924 + inc_mm_counter_fast(mm, MM_FILEPAGES);
67925 + set_pte_at(mm, address_m, pte_m, entry_m);
67926 + update_mmu_cache(vma_m, address_m, entry_m);
67927 +out:
67928 + if (ptl != ptl_m)
67929 + spin_unlock(ptl_m);
67930 + pte_unmap(pte_m);
67931 +}
67932 +
67933 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67934 +{
67935 + struct mm_struct *mm = vma->vm_mm;
67936 + unsigned long address_m;
67937 + spinlock_t *ptl_m;
67938 + struct vm_area_struct *vma_m;
67939 + pmd_t *pmd_m;
67940 + pte_t *pte_m, entry_m;
67941 +
67942 + vma_m = pax_find_mirror_vma(vma);
67943 + if (!vma_m)
67944 + return;
67945 +
67946 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67947 + address_m = address + SEGMEXEC_TASK_SIZE;
67948 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67949 + pte_m = pte_offset_map(pmd_m, address_m);
67950 + ptl_m = pte_lockptr(mm, pmd_m);
67951 + if (ptl != ptl_m) {
67952 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67953 + if (!pte_none(*pte_m))
67954 + goto out;
67955 + }
67956 +
67957 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67958 + set_pte_at(mm, address_m, pte_m, entry_m);
67959 +out:
67960 + if (ptl != ptl_m)
67961 + spin_unlock(ptl_m);
67962 + pte_unmap(pte_m);
67963 +}
67964 +
67965 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67966 +{
67967 + struct page *page_m;
67968 + pte_t entry;
67969 +
67970 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67971 + goto out;
67972 +
67973 + entry = *pte;
67974 + page_m = vm_normal_page(vma, address, entry);
67975 + if (!page_m)
67976 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67977 + else if (PageAnon(page_m)) {
67978 + if (pax_find_mirror_vma(vma)) {
67979 + pte_unmap_unlock(pte, ptl);
67980 + lock_page(page_m);
67981 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67982 + if (pte_same(entry, *pte))
67983 + pax_mirror_anon_pte(vma, address, page_m, ptl);
67984 + else
67985 + unlock_page(page_m);
67986 + }
67987 + } else
67988 + pax_mirror_file_pte(vma, address, page_m, ptl);
67989 +
67990 +out:
67991 + pte_unmap_unlock(pte, ptl);
67992 +}
67993 +#endif
67994 +
67995 /*
67996 * This routine handles present pages, when users try to write
67997 * to a shared page. It is done by copying the page to a new address
67998 @@ -2667,6 +2860,12 @@ gotten:
67999 */
68000 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68001 if (likely(pte_same(*page_table, orig_pte))) {
68002 +
68003 +#ifdef CONFIG_PAX_SEGMEXEC
68004 + if (pax_find_mirror_vma(vma))
68005 + BUG_ON(!trylock_page(new_page));
68006 +#endif
68007 +
68008 if (old_page) {
68009 if (!PageAnon(old_page)) {
68010 dec_mm_counter_fast(mm, MM_FILEPAGES);
68011 @@ -2718,6 +2917,10 @@ gotten:
68012 page_remove_rmap(old_page);
68013 }
68014
68015 +#ifdef CONFIG_PAX_SEGMEXEC
68016 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68017 +#endif
68018 +
68019 /* Free the old page.. */
68020 new_page = old_page;
68021 ret |= VM_FAULT_WRITE;
68022 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
68023 swap_free(entry);
68024 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68025 try_to_free_swap(page);
68026 +
68027 +#ifdef CONFIG_PAX_SEGMEXEC
68028 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68029 +#endif
68030 +
68031 unlock_page(page);
68032 if (swapcache) {
68033 /*
68034 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
68035
68036 /* No need to invalidate - it was non-present before */
68037 update_mmu_cache(vma, address, page_table);
68038 +
68039 +#ifdef CONFIG_PAX_SEGMEXEC
68040 + pax_mirror_anon_pte(vma, address, page, ptl);
68041 +#endif
68042 +
68043 unlock:
68044 pte_unmap_unlock(page_table, ptl);
68045 out:
68046 @@ -3039,40 +3252,6 @@ out_release:
68047 }
68048
68049 /*
68050 - * This is like a special single-page "expand_{down|up}wards()",
68051 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68052 - * doesn't hit another vma.
68053 - */
68054 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68055 -{
68056 - address &= PAGE_MASK;
68057 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68058 - struct vm_area_struct *prev = vma->vm_prev;
68059 -
68060 - /*
68061 - * Is there a mapping abutting this one below?
68062 - *
68063 - * That's only ok if it's the same stack mapping
68064 - * that has gotten split..
68065 - */
68066 - if (prev && prev->vm_end == address)
68067 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68068 -
68069 - expand_downwards(vma, address - PAGE_SIZE);
68070 - }
68071 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68072 - struct vm_area_struct *next = vma->vm_next;
68073 -
68074 - /* As VM_GROWSDOWN but s/below/above/ */
68075 - if (next && next->vm_start == address + PAGE_SIZE)
68076 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68077 -
68078 - expand_upwards(vma, address + PAGE_SIZE);
68079 - }
68080 - return 0;
68081 -}
68082 -
68083 -/*
68084 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68085 * but allow concurrent faults), and pte mapped but not yet locked.
68086 * We return with mmap_sem still held, but pte unmapped and unlocked.
68087 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
68088 unsigned long address, pte_t *page_table, pmd_t *pmd,
68089 unsigned int flags)
68090 {
68091 - struct page *page;
68092 + struct page *page = NULL;
68093 spinlock_t *ptl;
68094 pte_t entry;
68095
68096 - pte_unmap(page_table);
68097 -
68098 - /* Check if we need to add a guard page to the stack */
68099 - if (check_stack_guard_page(vma, address) < 0)
68100 - return VM_FAULT_SIGBUS;
68101 -
68102 - /* Use the zero-page for reads */
68103 if (!(flags & FAULT_FLAG_WRITE)) {
68104 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68105 vma->vm_page_prot));
68106 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68107 + ptl = pte_lockptr(mm, pmd);
68108 + spin_lock(ptl);
68109 if (!pte_none(*page_table))
68110 goto unlock;
68111 goto setpte;
68112 }
68113
68114 /* Allocate our own private page. */
68115 + pte_unmap(page_table);
68116 +
68117 if (unlikely(anon_vma_prepare(vma)))
68118 goto oom;
68119 page = alloc_zeroed_user_highpage_movable(vma, address);
68120 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
68121 if (!pte_none(*page_table))
68122 goto release;
68123
68124 +#ifdef CONFIG_PAX_SEGMEXEC
68125 + if (pax_find_mirror_vma(vma))
68126 + BUG_ON(!trylock_page(page));
68127 +#endif
68128 +
68129 inc_mm_counter_fast(mm, MM_ANONPAGES);
68130 page_add_new_anon_rmap(page, vma, address);
68131 setpte:
68132 @@ -3127,6 +3307,12 @@ setpte:
68133
68134 /* No need to invalidate - it was non-present before */
68135 update_mmu_cache(vma, address, page_table);
68136 +
68137 +#ifdef CONFIG_PAX_SEGMEXEC
68138 + if (page)
68139 + pax_mirror_anon_pte(vma, address, page, ptl);
68140 +#endif
68141 +
68142 unlock:
68143 pte_unmap_unlock(page_table, ptl);
68144 return 0;
68145 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
68146 */
68147 /* Only go through if we didn't race with anybody else... */
68148 if (likely(pte_same(*page_table, orig_pte))) {
68149 +
68150 +#ifdef CONFIG_PAX_SEGMEXEC
68151 + if (anon && pax_find_mirror_vma(vma))
68152 + BUG_ON(!trylock_page(page));
68153 +#endif
68154 +
68155 flush_icache_page(vma, page);
68156 entry = mk_pte(page, vma->vm_page_prot);
68157 if (flags & FAULT_FLAG_WRITE)
68158 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
68159
68160 /* no need to invalidate: a not-present page won't be cached */
68161 update_mmu_cache(vma, address, page_table);
68162 +
68163 +#ifdef CONFIG_PAX_SEGMEXEC
68164 + if (anon)
68165 + pax_mirror_anon_pte(vma, address, page, ptl);
68166 + else
68167 + pax_mirror_file_pte(vma, address, page, ptl);
68168 +#endif
68169 +
68170 } else {
68171 if (charged)
68172 mem_cgroup_uncharge_page(page);
68173 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
68174 if (flags & FAULT_FLAG_WRITE)
68175 flush_tlb_fix_spurious_fault(vma, address);
68176 }
68177 +
68178 +#ifdef CONFIG_PAX_SEGMEXEC
68179 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68180 + return 0;
68181 +#endif
68182 +
68183 unlock:
68184 pte_unmap_unlock(pte, ptl);
68185 return 0;
68186 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
68187 pmd_t *pmd;
68188 pte_t *pte;
68189
68190 +#ifdef CONFIG_PAX_SEGMEXEC
68191 + struct vm_area_struct *vma_m;
68192 +#endif
68193 +
68194 __set_current_state(TASK_RUNNING);
68195
68196 count_vm_event(PGFAULT);
68197 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
68198 if (unlikely(is_vm_hugetlb_page(vma)))
68199 return hugetlb_fault(mm, vma, address, flags);
68200
68201 +#ifdef CONFIG_PAX_SEGMEXEC
68202 + vma_m = pax_find_mirror_vma(vma);
68203 + if (vma_m) {
68204 + unsigned long address_m;
68205 + pgd_t *pgd_m;
68206 + pud_t *pud_m;
68207 + pmd_t *pmd_m;
68208 +
68209 + if (vma->vm_start > vma_m->vm_start) {
68210 + address_m = address;
68211 + address -= SEGMEXEC_TASK_SIZE;
68212 + vma = vma_m;
68213 + } else
68214 + address_m = address + SEGMEXEC_TASK_SIZE;
68215 +
68216 + pgd_m = pgd_offset(mm, address_m);
68217 + pud_m = pud_alloc(mm, pgd_m, address_m);
68218 + if (!pud_m)
68219 + return VM_FAULT_OOM;
68220 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68221 + if (!pmd_m)
68222 + return VM_FAULT_OOM;
68223 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68224 + return VM_FAULT_OOM;
68225 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68226 + }
68227 +#endif
68228 +
68229 pgd = pgd_offset(mm, address);
68230 pud = pud_alloc(mm, pgd, address);
68231 if (!pud)
68232 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
68233 * run pte_offset_map on the pmd, if an huge pmd could
68234 * materialize from under us from a different thread.
68235 */
68236 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68237 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68238 return VM_FAULT_OOM;
68239 /* if an huge pmd materialized from under us just retry later */
68240 if (unlikely(pmd_trans_huge(*pmd)))
68241 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
68242 gate_vma.vm_start = FIXADDR_USER_START;
68243 gate_vma.vm_end = FIXADDR_USER_END;
68244 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68245 - gate_vma.vm_page_prot = __P101;
68246 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68247 /*
68248 * Make sure the vDSO gets into every core dump.
68249 * Dumping its contents makes post-mortem fully interpretable later
68250 diff -urNp linux-3.0.8/mm/memory-failure.c linux-3.0.8/mm/memory-failure.c
68251 --- linux-3.0.8/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
68252 +++ linux-3.0.8/mm/memory-failure.c 2011-10-06 04:17:55.000000000 -0400
68253 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
68254
68255 int sysctl_memory_failure_recovery __read_mostly = 1;
68256
68257 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68258 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68259
68260 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68261
68262 @@ -200,7 +200,7 @@ static int kill_proc_ao(struct task_stru
68263 si.si_signo = SIGBUS;
68264 si.si_errno = 0;
68265 si.si_code = BUS_MCEERR_AO;
68266 - si.si_addr = (void *)addr;
68267 + si.si_addr = (void __user *)addr;
68268 #ifdef __ARCH_SI_TRAPNO
68269 si.si_trapno = trapno;
68270 #endif
68271 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
68272 }
68273
68274 nr_pages = 1 << compound_trans_order(hpage);
68275 - atomic_long_add(nr_pages, &mce_bad_pages);
68276 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68277
68278 /*
68279 * We need/can do nothing about count=0 pages.
68280 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
68281 if (!PageHWPoison(hpage)
68282 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68283 || (p != hpage && TestSetPageHWPoison(hpage))) {
68284 - atomic_long_sub(nr_pages, &mce_bad_pages);
68285 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68286 return 0;
68287 }
68288 set_page_hwpoison_huge_page(hpage);
68289 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
68290 }
68291 if (hwpoison_filter(p)) {
68292 if (TestClearPageHWPoison(p))
68293 - atomic_long_sub(nr_pages, &mce_bad_pages);
68294 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68295 unlock_page(hpage);
68296 put_page(hpage);
68297 return 0;
68298 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
68299 return 0;
68300 }
68301 if (TestClearPageHWPoison(p))
68302 - atomic_long_sub(nr_pages, &mce_bad_pages);
68303 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68304 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68305 return 0;
68306 }
68307 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
68308 */
68309 if (TestClearPageHWPoison(page)) {
68310 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68311 - atomic_long_sub(nr_pages, &mce_bad_pages);
68312 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68313 freeit = 1;
68314 if (PageHuge(page))
68315 clear_page_hwpoison_huge_page(page);
68316 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
68317 }
68318 done:
68319 if (!PageHWPoison(hpage))
68320 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68321 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68322 set_page_hwpoison_huge_page(hpage);
68323 dequeue_hwpoisoned_huge_page(hpage);
68324 /* keep elevated page count for bad page */
68325 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
68326 return ret;
68327
68328 done:
68329 - atomic_long_add(1, &mce_bad_pages);
68330 + atomic_long_add_unchecked(1, &mce_bad_pages);
68331 SetPageHWPoison(page);
68332 /* keep elevated page count for bad page */
68333 return ret;
68334 diff -urNp linux-3.0.8/mm/mempolicy.c linux-3.0.8/mm/mempolicy.c
68335 --- linux-3.0.8/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
68336 +++ linux-3.0.8/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
68337 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
68338 unsigned long vmstart;
68339 unsigned long vmend;
68340
68341 +#ifdef CONFIG_PAX_SEGMEXEC
68342 + struct vm_area_struct *vma_m;
68343 +#endif
68344 +
68345 vma = find_vma_prev(mm, start, &prev);
68346 if (!vma || vma->vm_start > start)
68347 return -EFAULT;
68348 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
68349 err = policy_vma(vma, new_pol);
68350 if (err)
68351 goto out;
68352 +
68353 +#ifdef CONFIG_PAX_SEGMEXEC
68354 + vma_m = pax_find_mirror_vma(vma);
68355 + if (vma_m) {
68356 + err = policy_vma(vma_m, new_pol);
68357 + if (err)
68358 + goto out;
68359 + }
68360 +#endif
68361 +
68362 }
68363
68364 out:
68365 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
68366
68367 if (end < start)
68368 return -EINVAL;
68369 +
68370 +#ifdef CONFIG_PAX_SEGMEXEC
68371 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68372 + if (end > SEGMEXEC_TASK_SIZE)
68373 + return -EINVAL;
68374 + } else
68375 +#endif
68376 +
68377 + if (end > TASK_SIZE)
68378 + return -EINVAL;
68379 +
68380 if (end == start)
68381 return 0;
68382
68383 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68384 if (!mm)
68385 goto out;
68386
68387 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68388 + if (mm != current->mm &&
68389 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68390 + err = -EPERM;
68391 + goto out;
68392 + }
68393 +#endif
68394 +
68395 /*
68396 * Check if this process has the right to modify the specified
68397 * process. The right exists if the process has administrative
68398 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68399 rcu_read_lock();
68400 tcred = __task_cred(task);
68401 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68402 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68403 - !capable(CAP_SYS_NICE)) {
68404 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68405 rcu_read_unlock();
68406 err = -EPERM;
68407 goto out;
68408 diff -urNp linux-3.0.8/mm/migrate.c linux-3.0.8/mm/migrate.c
68409 --- linux-3.0.8/mm/migrate.c 2011-10-25 09:10:33.000000000 -0400
68410 +++ linux-3.0.8/mm/migrate.c 2011-10-25 09:10:41.000000000 -0400
68411 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
68412 unsigned long chunk_start;
68413 int err;
68414
68415 + pax_track_stack();
68416 +
68417 task_nodes = cpuset_mems_allowed(task);
68418
68419 err = -ENOMEM;
68420 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68421 if (!mm)
68422 return -EINVAL;
68423
68424 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68425 + if (mm != current->mm &&
68426 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68427 + err = -EPERM;
68428 + goto out;
68429 + }
68430 +#endif
68431 +
68432 /*
68433 * Check if this process has the right to modify the specified
68434 * process. The right exists if the process has administrative
68435 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68436 rcu_read_lock();
68437 tcred = __task_cred(task);
68438 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68439 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68440 - !capable(CAP_SYS_NICE)) {
68441 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68442 rcu_read_unlock();
68443 err = -EPERM;
68444 goto out;
68445 diff -urNp linux-3.0.8/mm/mlock.c linux-3.0.8/mm/mlock.c
68446 --- linux-3.0.8/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
68447 +++ linux-3.0.8/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
68448 @@ -13,6 +13,7 @@
68449 #include <linux/pagemap.h>
68450 #include <linux/mempolicy.h>
68451 #include <linux/syscalls.h>
68452 +#include <linux/security.h>
68453 #include <linux/sched.h>
68454 #include <linux/module.h>
68455 #include <linux/rmap.h>
68456 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
68457 return -EINVAL;
68458 if (end == start)
68459 return 0;
68460 + if (end > TASK_SIZE)
68461 + return -EINVAL;
68462 +
68463 vma = find_vma_prev(current->mm, start, &prev);
68464 if (!vma || vma->vm_start > start)
68465 return -ENOMEM;
68466 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
68467 for (nstart = start ; ; ) {
68468 vm_flags_t newflags;
68469
68470 +#ifdef CONFIG_PAX_SEGMEXEC
68471 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68472 + break;
68473 +#endif
68474 +
68475 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68476
68477 newflags = vma->vm_flags | VM_LOCKED;
68478 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68479 lock_limit >>= PAGE_SHIFT;
68480
68481 /* check against resource limits */
68482 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68483 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68484 error = do_mlock(start, len, 1);
68485 up_write(&current->mm->mmap_sem);
68486 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68487 static int do_mlockall(int flags)
68488 {
68489 struct vm_area_struct * vma, * prev = NULL;
68490 - unsigned int def_flags = 0;
68491
68492 if (flags & MCL_FUTURE)
68493 - def_flags = VM_LOCKED;
68494 - current->mm->def_flags = def_flags;
68495 + current->mm->def_flags |= VM_LOCKED;
68496 + else
68497 + current->mm->def_flags &= ~VM_LOCKED;
68498 if (flags == MCL_FUTURE)
68499 goto out;
68500
68501 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68502 vm_flags_t newflags;
68503
68504 +#ifdef CONFIG_PAX_SEGMEXEC
68505 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68506 + break;
68507 +#endif
68508 +
68509 + BUG_ON(vma->vm_end > TASK_SIZE);
68510 newflags = vma->vm_flags | VM_LOCKED;
68511 if (!(flags & MCL_CURRENT))
68512 newflags &= ~VM_LOCKED;
68513 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68514 lock_limit >>= PAGE_SHIFT;
68515
68516 ret = -ENOMEM;
68517 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68518 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68519 capable(CAP_IPC_LOCK))
68520 ret = do_mlockall(flags);
68521 diff -urNp linux-3.0.8/mm/mmap.c linux-3.0.8/mm/mmap.c
68522 --- linux-3.0.8/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
68523 +++ linux-3.0.8/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
68524 @@ -46,6 +46,16 @@
68525 #define arch_rebalance_pgtables(addr, len) (addr)
68526 #endif
68527
68528 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68529 +{
68530 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68531 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68532 + up_read(&mm->mmap_sem);
68533 + BUG();
68534 + }
68535 +#endif
68536 +}
68537 +
68538 static void unmap_region(struct mm_struct *mm,
68539 struct vm_area_struct *vma, struct vm_area_struct *prev,
68540 unsigned long start, unsigned long end);
68541 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
68542 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68543 *
68544 */
68545 -pgprot_t protection_map[16] = {
68546 +pgprot_t protection_map[16] __read_only = {
68547 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68548 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68549 };
68550
68551 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
68552 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68553 {
68554 - return __pgprot(pgprot_val(protection_map[vm_flags &
68555 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68556 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68557 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68558 +
68559 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68560 + if (!(__supported_pte_mask & _PAGE_NX) &&
68561 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68562 + (vm_flags & (VM_READ | VM_WRITE)))
68563 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68564 +#endif
68565 +
68566 + return prot;
68567 }
68568 EXPORT_SYMBOL(vm_get_page_prot);
68569
68570 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68571 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68572 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68573 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68574 /*
68575 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68576 * other variables. It can be updated by several CPUs frequently.
68577 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
68578 struct vm_area_struct *next = vma->vm_next;
68579
68580 might_sleep();
68581 + BUG_ON(vma->vm_mirror);
68582 if (vma->vm_ops && vma->vm_ops->close)
68583 vma->vm_ops->close(vma);
68584 if (vma->vm_file) {
68585 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68586 * not page aligned -Ram Gupta
68587 */
68588 rlim = rlimit(RLIMIT_DATA);
68589 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68590 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68591 (mm->end_data - mm->start_data) > rlim)
68592 goto out;
68593 @@ -697,6 +719,12 @@ static int
68594 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68595 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68596 {
68597 +
68598 +#ifdef CONFIG_PAX_SEGMEXEC
68599 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68600 + return 0;
68601 +#endif
68602 +
68603 if (is_mergeable_vma(vma, file, vm_flags) &&
68604 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68605 if (vma->vm_pgoff == vm_pgoff)
68606 @@ -716,6 +744,12 @@ static int
68607 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68608 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68609 {
68610 +
68611 +#ifdef CONFIG_PAX_SEGMEXEC
68612 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68613 + return 0;
68614 +#endif
68615 +
68616 if (is_mergeable_vma(vma, file, vm_flags) &&
68617 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68618 pgoff_t vm_pglen;
68619 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
68620 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68621 struct vm_area_struct *prev, unsigned long addr,
68622 unsigned long end, unsigned long vm_flags,
68623 - struct anon_vma *anon_vma, struct file *file,
68624 + struct anon_vma *anon_vma, struct file *file,
68625 pgoff_t pgoff, struct mempolicy *policy)
68626 {
68627 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68628 struct vm_area_struct *area, *next;
68629 int err;
68630
68631 +#ifdef CONFIG_PAX_SEGMEXEC
68632 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68633 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68634 +
68635 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68636 +#endif
68637 +
68638 /*
68639 * We later require that vma->vm_flags == vm_flags,
68640 * so this tests vma->vm_flags & VM_SPECIAL, too.
68641 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
68642 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68643 next = next->vm_next;
68644
68645 +#ifdef CONFIG_PAX_SEGMEXEC
68646 + if (prev)
68647 + prev_m = pax_find_mirror_vma(prev);
68648 + if (area)
68649 + area_m = pax_find_mirror_vma(area);
68650 + if (next)
68651 + next_m = pax_find_mirror_vma(next);
68652 +#endif
68653 +
68654 /*
68655 * Can it merge with the predecessor?
68656 */
68657 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
68658 /* cases 1, 6 */
68659 err = vma_adjust(prev, prev->vm_start,
68660 next->vm_end, prev->vm_pgoff, NULL);
68661 - } else /* cases 2, 5, 7 */
68662 +
68663 +#ifdef CONFIG_PAX_SEGMEXEC
68664 + if (!err && prev_m)
68665 + err = vma_adjust(prev_m, prev_m->vm_start,
68666 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68667 +#endif
68668 +
68669 + } else { /* cases 2, 5, 7 */
68670 err = vma_adjust(prev, prev->vm_start,
68671 end, prev->vm_pgoff, NULL);
68672 +
68673 +#ifdef CONFIG_PAX_SEGMEXEC
68674 + if (!err && prev_m)
68675 + err = vma_adjust(prev_m, prev_m->vm_start,
68676 + end_m, prev_m->vm_pgoff, NULL);
68677 +#endif
68678 +
68679 + }
68680 if (err)
68681 return NULL;
68682 khugepaged_enter_vma_merge(prev);
68683 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
68684 mpol_equal(policy, vma_policy(next)) &&
68685 can_vma_merge_before(next, vm_flags,
68686 anon_vma, file, pgoff+pglen)) {
68687 - if (prev && addr < prev->vm_end) /* case 4 */
68688 + if (prev && addr < prev->vm_end) { /* case 4 */
68689 err = vma_adjust(prev, prev->vm_start,
68690 addr, prev->vm_pgoff, NULL);
68691 - else /* cases 3, 8 */
68692 +
68693 +#ifdef CONFIG_PAX_SEGMEXEC
68694 + if (!err && prev_m)
68695 + err = vma_adjust(prev_m, prev_m->vm_start,
68696 + addr_m, prev_m->vm_pgoff, NULL);
68697 +#endif
68698 +
68699 + } else { /* cases 3, 8 */
68700 err = vma_adjust(area, addr, next->vm_end,
68701 next->vm_pgoff - pglen, NULL);
68702 +
68703 +#ifdef CONFIG_PAX_SEGMEXEC
68704 + if (!err && area_m)
68705 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
68706 + next_m->vm_pgoff - pglen, NULL);
68707 +#endif
68708 +
68709 + }
68710 if (err)
68711 return NULL;
68712 khugepaged_enter_vma_merge(area);
68713 @@ -929,14 +1009,11 @@ none:
68714 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68715 struct file *file, long pages)
68716 {
68717 - const unsigned long stack_flags
68718 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68719 -
68720 if (file) {
68721 mm->shared_vm += pages;
68722 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68723 mm->exec_vm += pages;
68724 - } else if (flags & stack_flags)
68725 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68726 mm->stack_vm += pages;
68727 if (flags & (VM_RESERVED|VM_IO))
68728 mm->reserved_vm += pages;
68729 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
68730 * (the exception is when the underlying filesystem is noexec
68731 * mounted, in which case we dont add PROT_EXEC.)
68732 */
68733 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68734 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68735 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68736 prot |= PROT_EXEC;
68737
68738 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
68739 /* Obtain the address to map to. we verify (or select) it and ensure
68740 * that it represents a valid section of the address space.
68741 */
68742 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68743 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68744 if (addr & ~PAGE_MASK)
68745 return addr;
68746
68747 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
68748 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68749 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68750
68751 +#ifdef CONFIG_PAX_MPROTECT
68752 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68753 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68754 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68755 + gr_log_rwxmmap(file);
68756 +
68757 +#ifdef CONFIG_PAX_EMUPLT
68758 + vm_flags &= ~VM_EXEC;
68759 +#else
68760 + return -EPERM;
68761 +#endif
68762 +
68763 + }
68764 +
68765 + if (!(vm_flags & VM_EXEC))
68766 + vm_flags &= ~VM_MAYEXEC;
68767 +#else
68768 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68769 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68770 +#endif
68771 + else
68772 + vm_flags &= ~VM_MAYWRITE;
68773 + }
68774 +#endif
68775 +
68776 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68777 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68778 + vm_flags &= ~VM_PAGEEXEC;
68779 +#endif
68780 +
68781 if (flags & MAP_LOCKED)
68782 if (!can_do_mlock())
68783 return -EPERM;
68784 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
68785 locked += mm->locked_vm;
68786 lock_limit = rlimit(RLIMIT_MEMLOCK);
68787 lock_limit >>= PAGE_SHIFT;
68788 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68789 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68790 return -EAGAIN;
68791 }
68792 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
68793 if (error)
68794 return error;
68795
68796 + if (!gr_acl_handle_mmap(file, prot))
68797 + return -EACCES;
68798 +
68799 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68800 }
68801 EXPORT_SYMBOL(do_mmap_pgoff);
68802 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
68803 vm_flags_t vm_flags = vma->vm_flags;
68804
68805 /* If it was private or non-writable, the write bit is already clear */
68806 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68807 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68808 return 0;
68809
68810 /* The backer wishes to know when pages are first written to? */
68811 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
68812 unsigned long charged = 0;
68813 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68814
68815 +#ifdef CONFIG_PAX_SEGMEXEC
68816 + struct vm_area_struct *vma_m = NULL;
68817 +#endif
68818 +
68819 + /*
68820 + * mm->mmap_sem is required to protect against another thread
68821 + * changing the mappings in case we sleep.
68822 + */
68823 + verify_mm_writelocked(mm);
68824 +
68825 /* Clear old maps */
68826 error = -ENOMEM;
68827 -munmap_back:
68828 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68829 if (vma && vma->vm_start < addr + len) {
68830 if (do_munmap(mm, addr, len))
68831 return -ENOMEM;
68832 - goto munmap_back;
68833 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68834 + BUG_ON(vma && vma->vm_start < addr + len);
68835 }
68836
68837 /* Check against address space limit. */
68838 @@ -1266,6 +1387,16 @@ munmap_back:
68839 goto unacct_error;
68840 }
68841
68842 +#ifdef CONFIG_PAX_SEGMEXEC
68843 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68844 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68845 + if (!vma_m) {
68846 + error = -ENOMEM;
68847 + goto free_vma;
68848 + }
68849 + }
68850 +#endif
68851 +
68852 vma->vm_mm = mm;
68853 vma->vm_start = addr;
68854 vma->vm_end = addr + len;
68855 @@ -1289,6 +1420,19 @@ munmap_back:
68856 error = file->f_op->mmap(file, vma);
68857 if (error)
68858 goto unmap_and_free_vma;
68859 +
68860 +#ifdef CONFIG_PAX_SEGMEXEC
68861 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68862 + added_exe_file_vma(mm);
68863 +#endif
68864 +
68865 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68866 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68867 + vma->vm_flags |= VM_PAGEEXEC;
68868 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68869 + }
68870 +#endif
68871 +
68872 if (vm_flags & VM_EXECUTABLE)
68873 added_exe_file_vma(mm);
68874
68875 @@ -1324,6 +1468,11 @@ munmap_back:
68876 vma_link(mm, vma, prev, rb_link, rb_parent);
68877 file = vma->vm_file;
68878
68879 +#ifdef CONFIG_PAX_SEGMEXEC
68880 + if (vma_m)
68881 + BUG_ON(pax_mirror_vma(vma_m, vma));
68882 +#endif
68883 +
68884 /* Once vma denies write, undo our temporary denial count */
68885 if (correct_wcount)
68886 atomic_inc(&inode->i_writecount);
68887 @@ -1332,6 +1481,7 @@ out:
68888
68889 mm->total_vm += len >> PAGE_SHIFT;
68890 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68891 + track_exec_limit(mm, addr, addr + len, vm_flags);
68892 if (vm_flags & VM_LOCKED) {
68893 if (!mlock_vma_pages_range(vma, addr, addr + len))
68894 mm->locked_vm += (len >> PAGE_SHIFT);
68895 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
68896 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68897 charged = 0;
68898 free_vma:
68899 +
68900 +#ifdef CONFIG_PAX_SEGMEXEC
68901 + if (vma_m)
68902 + kmem_cache_free(vm_area_cachep, vma_m);
68903 +#endif
68904 +
68905 kmem_cache_free(vm_area_cachep, vma);
68906 unacct_error:
68907 if (charged)
68908 @@ -1356,6 +1512,44 @@ unacct_error:
68909 return error;
68910 }
68911
68912 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68913 +{
68914 + if (!vma) {
68915 +#ifdef CONFIG_STACK_GROWSUP
68916 + if (addr > sysctl_heap_stack_gap)
68917 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68918 + else
68919 + vma = find_vma(current->mm, 0);
68920 + if (vma && (vma->vm_flags & VM_GROWSUP))
68921 + return false;
68922 +#endif
68923 + return true;
68924 + }
68925 +
68926 + if (addr + len > vma->vm_start)
68927 + return false;
68928 +
68929 + if (vma->vm_flags & VM_GROWSDOWN)
68930 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68931 +#ifdef CONFIG_STACK_GROWSUP
68932 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68933 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68934 +#endif
68935 +
68936 + return true;
68937 +}
68938 +
68939 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68940 +{
68941 + if (vma->vm_start < len)
68942 + return -ENOMEM;
68943 + if (!(vma->vm_flags & VM_GROWSDOWN))
68944 + return vma->vm_start - len;
68945 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68946 + return vma->vm_start - len - sysctl_heap_stack_gap;
68947 + return -ENOMEM;
68948 +}
68949 +
68950 /* Get an address range which is currently unmapped.
68951 * For shmat() with addr=0.
68952 *
68953 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
68954 if (flags & MAP_FIXED)
68955 return addr;
68956
68957 +#ifdef CONFIG_PAX_RANDMMAP
68958 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68959 +#endif
68960 +
68961 if (addr) {
68962 addr = PAGE_ALIGN(addr);
68963 - vma = find_vma(mm, addr);
68964 - if (TASK_SIZE - len >= addr &&
68965 - (!vma || addr + len <= vma->vm_start))
68966 - return addr;
68967 + if (TASK_SIZE - len >= addr) {
68968 + vma = find_vma(mm, addr);
68969 + if (check_heap_stack_gap(vma, addr, len))
68970 + return addr;
68971 + }
68972 }
68973 if (len > mm->cached_hole_size) {
68974 - start_addr = addr = mm->free_area_cache;
68975 + start_addr = addr = mm->free_area_cache;
68976 } else {
68977 - start_addr = addr = TASK_UNMAPPED_BASE;
68978 - mm->cached_hole_size = 0;
68979 + start_addr = addr = mm->mmap_base;
68980 + mm->cached_hole_size = 0;
68981 }
68982
68983 full_search:
68984 @@ -1404,34 +1603,40 @@ full_search:
68985 * Start a new search - just in case we missed
68986 * some holes.
68987 */
68988 - if (start_addr != TASK_UNMAPPED_BASE) {
68989 - addr = TASK_UNMAPPED_BASE;
68990 - start_addr = addr;
68991 + if (start_addr != mm->mmap_base) {
68992 + start_addr = addr = mm->mmap_base;
68993 mm->cached_hole_size = 0;
68994 goto full_search;
68995 }
68996 return -ENOMEM;
68997 }
68998 - if (!vma || addr + len <= vma->vm_start) {
68999 - /*
69000 - * Remember the place where we stopped the search:
69001 - */
69002 - mm->free_area_cache = addr + len;
69003 - return addr;
69004 - }
69005 + if (check_heap_stack_gap(vma, addr, len))
69006 + break;
69007 if (addr + mm->cached_hole_size < vma->vm_start)
69008 mm->cached_hole_size = vma->vm_start - addr;
69009 addr = vma->vm_end;
69010 }
69011 +
69012 + /*
69013 + * Remember the place where we stopped the search:
69014 + */
69015 + mm->free_area_cache = addr + len;
69016 + return addr;
69017 }
69018 #endif
69019
69020 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69021 {
69022 +
69023 +#ifdef CONFIG_PAX_SEGMEXEC
69024 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69025 + return;
69026 +#endif
69027 +
69028 /*
69029 * Is this a new hole at the lowest possible address?
69030 */
69031 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69032 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69033 mm->free_area_cache = addr;
69034 mm->cached_hole_size = ~0UL;
69035 }
69036 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
69037 {
69038 struct vm_area_struct *vma;
69039 struct mm_struct *mm = current->mm;
69040 - unsigned long addr = addr0;
69041 + unsigned long base = mm->mmap_base, addr = addr0;
69042
69043 /* requested length too big for entire address space */
69044 if (len > TASK_SIZE)
69045 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
69046 if (flags & MAP_FIXED)
69047 return addr;
69048
69049 +#ifdef CONFIG_PAX_RANDMMAP
69050 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69051 +#endif
69052 +
69053 /* requesting a specific address */
69054 if (addr) {
69055 addr = PAGE_ALIGN(addr);
69056 - vma = find_vma(mm, addr);
69057 - if (TASK_SIZE - len >= addr &&
69058 - (!vma || addr + len <= vma->vm_start))
69059 - return addr;
69060 + if (TASK_SIZE - len >= addr) {
69061 + vma = find_vma(mm, addr);
69062 + if (check_heap_stack_gap(vma, addr, len))
69063 + return addr;
69064 + }
69065 }
69066
69067 /* check if free_area_cache is useful for us */
69068 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
69069 /* make sure it can fit in the remaining address space */
69070 if (addr > len) {
69071 vma = find_vma(mm, addr-len);
69072 - if (!vma || addr <= vma->vm_start)
69073 + if (check_heap_stack_gap(vma, addr - len, len))
69074 /* remember the address as a hint for next time */
69075 return (mm->free_area_cache = addr-len);
69076 }
69077 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
69078 * return with success:
69079 */
69080 vma = find_vma(mm, addr);
69081 - if (!vma || addr+len <= vma->vm_start)
69082 + if (check_heap_stack_gap(vma, addr, len))
69083 /* remember the address as a hint for next time */
69084 return (mm->free_area_cache = addr);
69085
69086 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
69087 mm->cached_hole_size = vma->vm_start - addr;
69088
69089 /* try just below the current vma->vm_start */
69090 - addr = vma->vm_start-len;
69091 - } while (len < vma->vm_start);
69092 + addr = skip_heap_stack_gap(vma, len);
69093 + } while (!IS_ERR_VALUE(addr));
69094
69095 bottomup:
69096 /*
69097 @@ -1515,13 +1725,21 @@ bottomup:
69098 * can happen with large stack limits and large mmap()
69099 * allocations.
69100 */
69101 + mm->mmap_base = TASK_UNMAPPED_BASE;
69102 +
69103 +#ifdef CONFIG_PAX_RANDMMAP
69104 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69105 + mm->mmap_base += mm->delta_mmap;
69106 +#endif
69107 +
69108 + mm->free_area_cache = mm->mmap_base;
69109 mm->cached_hole_size = ~0UL;
69110 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69111 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69112 /*
69113 * Restore the topdown base:
69114 */
69115 - mm->free_area_cache = mm->mmap_base;
69116 + mm->mmap_base = base;
69117 + mm->free_area_cache = base;
69118 mm->cached_hole_size = ~0UL;
69119
69120 return addr;
69121 @@ -1530,6 +1748,12 @@ bottomup:
69122
69123 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69124 {
69125 +
69126 +#ifdef CONFIG_PAX_SEGMEXEC
69127 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69128 + return;
69129 +#endif
69130 +
69131 /*
69132 * Is this a new hole at the highest possible address?
69133 */
69134 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
69135 mm->free_area_cache = addr;
69136
69137 /* dont allow allocations above current base */
69138 - if (mm->free_area_cache > mm->mmap_base)
69139 + if (mm->free_area_cache > mm->mmap_base) {
69140 mm->free_area_cache = mm->mmap_base;
69141 + mm->cached_hole_size = ~0UL;
69142 + }
69143 }
69144
69145 unsigned long
69146 @@ -1646,6 +1872,28 @@ out:
69147 return prev ? prev->vm_next : vma;
69148 }
69149
69150 +#ifdef CONFIG_PAX_SEGMEXEC
69151 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69152 +{
69153 + struct vm_area_struct *vma_m;
69154 +
69155 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69156 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69157 + BUG_ON(vma->vm_mirror);
69158 + return NULL;
69159 + }
69160 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69161 + vma_m = vma->vm_mirror;
69162 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69163 + BUG_ON(vma->vm_file != vma_m->vm_file);
69164 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69165 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69166 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69167 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69168 + return vma_m;
69169 +}
69170 +#endif
69171 +
69172 /*
69173 * Verify that the stack growth is acceptable and
69174 * update accounting. This is shared with both the
69175 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
69176 return -ENOMEM;
69177
69178 /* Stack limit test */
69179 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69180 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69181 return -ENOMEM;
69182
69183 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
69184 locked = mm->locked_vm + grow;
69185 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69186 limit >>= PAGE_SHIFT;
69187 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69188 if (locked > limit && !capable(CAP_IPC_LOCK))
69189 return -ENOMEM;
69190 }
69191 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
69192 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69193 * vma is the last one with address > vma->vm_end. Have to extend vma.
69194 */
69195 +#ifndef CONFIG_IA64
69196 +static
69197 +#endif
69198 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69199 {
69200 int error;
69201 + bool locknext;
69202
69203 if (!(vma->vm_flags & VM_GROWSUP))
69204 return -EFAULT;
69205
69206 + /* Also guard against wrapping around to address 0. */
69207 + if (address < PAGE_ALIGN(address+1))
69208 + address = PAGE_ALIGN(address+1);
69209 + else
69210 + return -ENOMEM;
69211 +
69212 /*
69213 * We must make sure the anon_vma is allocated
69214 * so that the anon_vma locking is not a noop.
69215 */
69216 if (unlikely(anon_vma_prepare(vma)))
69217 return -ENOMEM;
69218 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69219 + if (locknext && anon_vma_prepare(vma->vm_next))
69220 + return -ENOMEM;
69221 vma_lock_anon_vma(vma);
69222 + if (locknext)
69223 + vma_lock_anon_vma(vma->vm_next);
69224
69225 /*
69226 * vma->vm_start/vm_end cannot change under us because the caller
69227 * is required to hold the mmap_sem in read mode. We need the
69228 - * anon_vma lock to serialize against concurrent expand_stacks.
69229 - * Also guard against wrapping around to address 0.
69230 + * anon_vma locks to serialize against concurrent expand_stacks
69231 + * and expand_upwards.
69232 */
69233 - if (address < PAGE_ALIGN(address+4))
69234 - address = PAGE_ALIGN(address+4);
69235 - else {
69236 - vma_unlock_anon_vma(vma);
69237 - return -ENOMEM;
69238 - }
69239 error = 0;
69240
69241 /* Somebody else might have raced and expanded it already */
69242 - if (address > vma->vm_end) {
69243 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69244 + error = -ENOMEM;
69245 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69246 unsigned long size, grow;
69247
69248 size = address - vma->vm_start;
69249 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
69250 }
69251 }
69252 }
69253 + if (locknext)
69254 + vma_unlock_anon_vma(vma->vm_next);
69255 vma_unlock_anon_vma(vma);
69256 khugepaged_enter_vma_merge(vma);
69257 return error;
69258 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
69259 unsigned long address)
69260 {
69261 int error;
69262 + bool lockprev = false;
69263 + struct vm_area_struct *prev;
69264
69265 /*
69266 * We must make sure the anon_vma is allocated
69267 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
69268 if (error)
69269 return error;
69270
69271 + prev = vma->vm_prev;
69272 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69273 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69274 +#endif
69275 + if (lockprev && anon_vma_prepare(prev))
69276 + return -ENOMEM;
69277 + if (lockprev)
69278 + vma_lock_anon_vma(prev);
69279 +
69280 vma_lock_anon_vma(vma);
69281
69282 /*
69283 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
69284 */
69285
69286 /* Somebody else might have raced and expanded it already */
69287 - if (address < vma->vm_start) {
69288 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69289 + error = -ENOMEM;
69290 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69291 unsigned long size, grow;
69292
69293 +#ifdef CONFIG_PAX_SEGMEXEC
69294 + struct vm_area_struct *vma_m;
69295 +
69296 + vma_m = pax_find_mirror_vma(vma);
69297 +#endif
69298 +
69299 size = vma->vm_end - address;
69300 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69301
69302 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
69303 if (!error) {
69304 vma->vm_start = address;
69305 vma->vm_pgoff -= grow;
69306 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69307 +
69308 +#ifdef CONFIG_PAX_SEGMEXEC
69309 + if (vma_m) {
69310 + vma_m->vm_start -= grow << PAGE_SHIFT;
69311 + vma_m->vm_pgoff -= grow;
69312 + }
69313 +#endif
69314 +
69315 perf_event_mmap(vma);
69316 }
69317 }
69318 }
69319 vma_unlock_anon_vma(vma);
69320 + if (lockprev)
69321 + vma_unlock_anon_vma(prev);
69322 khugepaged_enter_vma_merge(vma);
69323 return error;
69324 }
69325 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
69326 do {
69327 long nrpages = vma_pages(vma);
69328
69329 +#ifdef CONFIG_PAX_SEGMEXEC
69330 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69331 + vma = remove_vma(vma);
69332 + continue;
69333 + }
69334 +#endif
69335 +
69336 mm->total_vm -= nrpages;
69337 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69338 vma = remove_vma(vma);
69339 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69340 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69341 vma->vm_prev = NULL;
69342 do {
69343 +
69344 +#ifdef CONFIG_PAX_SEGMEXEC
69345 + if (vma->vm_mirror) {
69346 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69347 + vma->vm_mirror->vm_mirror = NULL;
69348 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69349 + vma->vm_mirror = NULL;
69350 + }
69351 +#endif
69352 +
69353 rb_erase(&vma->vm_rb, &mm->mm_rb);
69354 mm->map_count--;
69355 tail_vma = vma;
69356 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
69357 struct vm_area_struct *new;
69358 int err = -ENOMEM;
69359
69360 +#ifdef CONFIG_PAX_SEGMEXEC
69361 + struct vm_area_struct *vma_m, *new_m = NULL;
69362 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69363 +#endif
69364 +
69365 if (is_vm_hugetlb_page(vma) && (addr &
69366 ~(huge_page_mask(hstate_vma(vma)))))
69367 return -EINVAL;
69368
69369 +#ifdef CONFIG_PAX_SEGMEXEC
69370 + vma_m = pax_find_mirror_vma(vma);
69371 +#endif
69372 +
69373 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69374 if (!new)
69375 goto out_err;
69376
69377 +#ifdef CONFIG_PAX_SEGMEXEC
69378 + if (vma_m) {
69379 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69380 + if (!new_m) {
69381 + kmem_cache_free(vm_area_cachep, new);
69382 + goto out_err;
69383 + }
69384 + }
69385 +#endif
69386 +
69387 /* most fields are the same, copy all, and then fixup */
69388 *new = *vma;
69389
69390 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
69391 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69392 }
69393
69394 +#ifdef CONFIG_PAX_SEGMEXEC
69395 + if (vma_m) {
69396 + *new_m = *vma_m;
69397 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69398 + new_m->vm_mirror = new;
69399 + new->vm_mirror = new_m;
69400 +
69401 + if (new_below)
69402 + new_m->vm_end = addr_m;
69403 + else {
69404 + new_m->vm_start = addr_m;
69405 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69406 + }
69407 + }
69408 +#endif
69409 +
69410 pol = mpol_dup(vma_policy(vma));
69411 if (IS_ERR(pol)) {
69412 err = PTR_ERR(pol);
69413 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
69414 else
69415 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69416
69417 +#ifdef CONFIG_PAX_SEGMEXEC
69418 + if (!err && vma_m) {
69419 + if (anon_vma_clone(new_m, vma_m))
69420 + goto out_free_mpol;
69421 +
69422 + mpol_get(pol);
69423 + vma_set_policy(new_m, pol);
69424 +
69425 + if (new_m->vm_file) {
69426 + get_file(new_m->vm_file);
69427 + if (vma_m->vm_flags & VM_EXECUTABLE)
69428 + added_exe_file_vma(mm);
69429 + }
69430 +
69431 + if (new_m->vm_ops && new_m->vm_ops->open)
69432 + new_m->vm_ops->open(new_m);
69433 +
69434 + if (new_below)
69435 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69436 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69437 + else
69438 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69439 +
69440 + if (err) {
69441 + if (new_m->vm_ops && new_m->vm_ops->close)
69442 + new_m->vm_ops->close(new_m);
69443 + if (new_m->vm_file) {
69444 + if (vma_m->vm_flags & VM_EXECUTABLE)
69445 + removed_exe_file_vma(mm);
69446 + fput(new_m->vm_file);
69447 + }
69448 + mpol_put(pol);
69449 + }
69450 + }
69451 +#endif
69452 +
69453 /* Success. */
69454 if (!err)
69455 return 0;
69456 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
69457 removed_exe_file_vma(mm);
69458 fput(new->vm_file);
69459 }
69460 - unlink_anon_vmas(new);
69461 out_free_mpol:
69462 mpol_put(pol);
69463 out_free_vma:
69464 +
69465 +#ifdef CONFIG_PAX_SEGMEXEC
69466 + if (new_m) {
69467 + unlink_anon_vmas(new_m);
69468 + kmem_cache_free(vm_area_cachep, new_m);
69469 + }
69470 +#endif
69471 +
69472 + unlink_anon_vmas(new);
69473 kmem_cache_free(vm_area_cachep, new);
69474 out_err:
69475 return err;
69476 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
69477 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69478 unsigned long addr, int new_below)
69479 {
69480 +
69481 +#ifdef CONFIG_PAX_SEGMEXEC
69482 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69483 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69484 + if (mm->map_count >= sysctl_max_map_count-1)
69485 + return -ENOMEM;
69486 + } else
69487 +#endif
69488 +
69489 if (mm->map_count >= sysctl_max_map_count)
69490 return -ENOMEM;
69491
69492 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
69493 * work. This now handles partial unmappings.
69494 * Jeremy Fitzhardinge <jeremy@goop.org>
69495 */
69496 +#ifdef CONFIG_PAX_SEGMEXEC
69497 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69498 {
69499 + int ret = __do_munmap(mm, start, len);
69500 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69501 + return ret;
69502 +
69503 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69504 +}
69505 +
69506 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69507 +#else
69508 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69509 +#endif
69510 +{
69511 unsigned long end;
69512 struct vm_area_struct *vma, *prev, *last;
69513
69514 + /*
69515 + * mm->mmap_sem is required to protect against another thread
69516 + * changing the mappings in case we sleep.
69517 + */
69518 + verify_mm_writelocked(mm);
69519 +
69520 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69521 return -EINVAL;
69522
69523 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
69524 /* Fix up all other VM information */
69525 remove_vma_list(mm, vma);
69526
69527 + track_exec_limit(mm, start, end, 0UL);
69528 +
69529 return 0;
69530 }
69531
69532 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69533
69534 profile_munmap(addr);
69535
69536 +#ifdef CONFIG_PAX_SEGMEXEC
69537 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69538 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69539 + return -EINVAL;
69540 +#endif
69541 +
69542 down_write(&mm->mmap_sem);
69543 ret = do_munmap(mm, addr, len);
69544 up_write(&mm->mmap_sem);
69545 return ret;
69546 }
69547
69548 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69549 -{
69550 -#ifdef CONFIG_DEBUG_VM
69551 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69552 - WARN_ON(1);
69553 - up_read(&mm->mmap_sem);
69554 - }
69555 -#endif
69556 -}
69557 -
69558 /*
69559 * this is really a simplified "do_mmap". it only handles
69560 * anonymous maps. eventually we may be able to do some
69561 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
69562 struct rb_node ** rb_link, * rb_parent;
69563 pgoff_t pgoff = addr >> PAGE_SHIFT;
69564 int error;
69565 + unsigned long charged;
69566
69567 len = PAGE_ALIGN(len);
69568 if (!len)
69569 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
69570
69571 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69572
69573 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69574 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69575 + flags &= ~VM_EXEC;
69576 +
69577 +#ifdef CONFIG_PAX_MPROTECT
69578 + if (mm->pax_flags & MF_PAX_MPROTECT)
69579 + flags &= ~VM_MAYEXEC;
69580 +#endif
69581 +
69582 + }
69583 +#endif
69584 +
69585 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69586 if (error & ~PAGE_MASK)
69587 return error;
69588
69589 + charged = len >> PAGE_SHIFT;
69590 +
69591 /*
69592 * mlock MCL_FUTURE?
69593 */
69594 if (mm->def_flags & VM_LOCKED) {
69595 unsigned long locked, lock_limit;
69596 - locked = len >> PAGE_SHIFT;
69597 + locked = charged;
69598 locked += mm->locked_vm;
69599 lock_limit = rlimit(RLIMIT_MEMLOCK);
69600 lock_limit >>= PAGE_SHIFT;
69601 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
69602 /*
69603 * Clear old maps. this also does some error checking for us
69604 */
69605 - munmap_back:
69606 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69607 if (vma && vma->vm_start < addr + len) {
69608 if (do_munmap(mm, addr, len))
69609 return -ENOMEM;
69610 - goto munmap_back;
69611 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69612 + BUG_ON(vma && vma->vm_start < addr + len);
69613 }
69614
69615 /* Check against address space limits *after* clearing old maps... */
69616 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69617 + if (!may_expand_vm(mm, charged))
69618 return -ENOMEM;
69619
69620 if (mm->map_count > sysctl_max_map_count)
69621 return -ENOMEM;
69622
69623 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69624 + if (security_vm_enough_memory(charged))
69625 return -ENOMEM;
69626
69627 /* Can we just expand an old private anonymous mapping? */
69628 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
69629 */
69630 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69631 if (!vma) {
69632 - vm_unacct_memory(len >> PAGE_SHIFT);
69633 + vm_unacct_memory(charged);
69634 return -ENOMEM;
69635 }
69636
69637 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
69638 vma_link(mm, vma, prev, rb_link, rb_parent);
69639 out:
69640 perf_event_mmap(vma);
69641 - mm->total_vm += len >> PAGE_SHIFT;
69642 + mm->total_vm += charged;
69643 if (flags & VM_LOCKED) {
69644 if (!mlock_vma_pages_range(vma, addr, addr + len))
69645 - mm->locked_vm += (len >> PAGE_SHIFT);
69646 + mm->locked_vm += charged;
69647 }
69648 + track_exec_limit(mm, addr, addr + len, flags);
69649 return addr;
69650 }
69651
69652 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
69653 * Walk the list again, actually closing and freeing it,
69654 * with preemption enabled, without holding any MM locks.
69655 */
69656 - while (vma)
69657 + while (vma) {
69658 + vma->vm_mirror = NULL;
69659 vma = remove_vma(vma);
69660 + }
69661
69662 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69663 }
69664 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
69665 struct vm_area_struct * __vma, * prev;
69666 struct rb_node ** rb_link, * rb_parent;
69667
69668 +#ifdef CONFIG_PAX_SEGMEXEC
69669 + struct vm_area_struct *vma_m = NULL;
69670 +#endif
69671 +
69672 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69673 + return -EPERM;
69674 +
69675 /*
69676 * The vm_pgoff of a purely anonymous vma should be irrelevant
69677 * until its first write fault, when page's anon_vma and index
69678 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
69679 if ((vma->vm_flags & VM_ACCOUNT) &&
69680 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69681 return -ENOMEM;
69682 +
69683 +#ifdef CONFIG_PAX_SEGMEXEC
69684 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69685 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69686 + if (!vma_m)
69687 + return -ENOMEM;
69688 + }
69689 +#endif
69690 +
69691 vma_link(mm, vma, prev, rb_link, rb_parent);
69692 +
69693 +#ifdef CONFIG_PAX_SEGMEXEC
69694 + if (vma_m)
69695 + BUG_ON(pax_mirror_vma(vma_m, vma));
69696 +#endif
69697 +
69698 return 0;
69699 }
69700
69701 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
69702 struct rb_node **rb_link, *rb_parent;
69703 struct mempolicy *pol;
69704
69705 + BUG_ON(vma->vm_mirror);
69706 +
69707 /*
69708 * If anonymous vma has not yet been faulted, update new pgoff
69709 * to match new location, to increase its chance of merging.
69710 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
69711 return NULL;
69712 }
69713
69714 +#ifdef CONFIG_PAX_SEGMEXEC
69715 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69716 +{
69717 + struct vm_area_struct *prev_m;
69718 + struct rb_node **rb_link_m, *rb_parent_m;
69719 + struct mempolicy *pol_m;
69720 +
69721 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69722 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69723 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69724 + *vma_m = *vma;
69725 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69726 + if (anon_vma_clone(vma_m, vma))
69727 + return -ENOMEM;
69728 + pol_m = vma_policy(vma_m);
69729 + mpol_get(pol_m);
69730 + vma_set_policy(vma_m, pol_m);
69731 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69732 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69733 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69734 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69735 + if (vma_m->vm_file)
69736 + get_file(vma_m->vm_file);
69737 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69738 + vma_m->vm_ops->open(vma_m);
69739 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69740 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69741 + vma_m->vm_mirror = vma;
69742 + vma->vm_mirror = vma_m;
69743 + return 0;
69744 +}
69745 +#endif
69746 +
69747 /*
69748 * Return true if the calling process may expand its vm space by the passed
69749 * number of pages
69750 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
69751 unsigned long lim;
69752
69753 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69754 -
69755 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69756 if (cur + npages > lim)
69757 return 0;
69758 return 1;
69759 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
69760 vma->vm_start = addr;
69761 vma->vm_end = addr + len;
69762
69763 +#ifdef CONFIG_PAX_MPROTECT
69764 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69765 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69766 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69767 + return -EPERM;
69768 + if (!(vm_flags & VM_EXEC))
69769 + vm_flags &= ~VM_MAYEXEC;
69770 +#else
69771 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69772 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69773 +#endif
69774 + else
69775 + vm_flags &= ~VM_MAYWRITE;
69776 + }
69777 +#endif
69778 +
69779 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69780 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69781
69782 diff -urNp linux-3.0.8/mm/mprotect.c linux-3.0.8/mm/mprotect.c
69783 --- linux-3.0.8/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
69784 +++ linux-3.0.8/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
69785 @@ -23,10 +23,16 @@
69786 #include <linux/mmu_notifier.h>
69787 #include <linux/migrate.h>
69788 #include <linux/perf_event.h>
69789 +
69790 +#ifdef CONFIG_PAX_MPROTECT
69791 +#include <linux/elf.h>
69792 +#endif
69793 +
69794 #include <asm/uaccess.h>
69795 #include <asm/pgtable.h>
69796 #include <asm/cacheflush.h>
69797 #include <asm/tlbflush.h>
69798 +#include <asm/mmu_context.h>
69799
69800 #ifndef pgprot_modify
69801 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69802 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
69803 flush_tlb_range(vma, start, end);
69804 }
69805
69806 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69807 +/* called while holding the mmap semaphor for writing except stack expansion */
69808 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69809 +{
69810 + unsigned long oldlimit, newlimit = 0UL;
69811 +
69812 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69813 + return;
69814 +
69815 + spin_lock(&mm->page_table_lock);
69816 + oldlimit = mm->context.user_cs_limit;
69817 + if ((prot & VM_EXEC) && oldlimit < end)
69818 + /* USER_CS limit moved up */
69819 + newlimit = end;
69820 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69821 + /* USER_CS limit moved down */
69822 + newlimit = start;
69823 +
69824 + if (newlimit) {
69825 + mm->context.user_cs_limit = newlimit;
69826 +
69827 +#ifdef CONFIG_SMP
69828 + wmb();
69829 + cpus_clear(mm->context.cpu_user_cs_mask);
69830 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69831 +#endif
69832 +
69833 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69834 + }
69835 + spin_unlock(&mm->page_table_lock);
69836 + if (newlimit == end) {
69837 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69838 +
69839 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69840 + if (is_vm_hugetlb_page(vma))
69841 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69842 + else
69843 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69844 + }
69845 +}
69846 +#endif
69847 +
69848 int
69849 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69850 unsigned long start, unsigned long end, unsigned long newflags)
69851 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69852 int error;
69853 int dirty_accountable = 0;
69854
69855 +#ifdef CONFIG_PAX_SEGMEXEC
69856 + struct vm_area_struct *vma_m = NULL;
69857 + unsigned long start_m, end_m;
69858 +
69859 + start_m = start + SEGMEXEC_TASK_SIZE;
69860 + end_m = end + SEGMEXEC_TASK_SIZE;
69861 +#endif
69862 +
69863 if (newflags == oldflags) {
69864 *pprev = vma;
69865 return 0;
69866 }
69867
69868 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69869 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69870 +
69871 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69872 + return -ENOMEM;
69873 +
69874 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69875 + return -ENOMEM;
69876 + }
69877 +
69878 /*
69879 * If we make a private mapping writable we increase our commit;
69880 * but (without finer accounting) cannot reduce our commit if we
69881 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69882 }
69883 }
69884
69885 +#ifdef CONFIG_PAX_SEGMEXEC
69886 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69887 + if (start != vma->vm_start) {
69888 + error = split_vma(mm, vma, start, 1);
69889 + if (error)
69890 + goto fail;
69891 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69892 + *pprev = (*pprev)->vm_next;
69893 + }
69894 +
69895 + if (end != vma->vm_end) {
69896 + error = split_vma(mm, vma, end, 0);
69897 + if (error)
69898 + goto fail;
69899 + }
69900 +
69901 + if (pax_find_mirror_vma(vma)) {
69902 + error = __do_munmap(mm, start_m, end_m - start_m);
69903 + if (error)
69904 + goto fail;
69905 + } else {
69906 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69907 + if (!vma_m) {
69908 + error = -ENOMEM;
69909 + goto fail;
69910 + }
69911 + vma->vm_flags = newflags;
69912 + error = pax_mirror_vma(vma_m, vma);
69913 + if (error) {
69914 + vma->vm_flags = oldflags;
69915 + goto fail;
69916 + }
69917 + }
69918 + }
69919 +#endif
69920 +
69921 /*
69922 * First try to merge with previous and/or next vma.
69923 */
69924 @@ -204,9 +306,21 @@ success:
69925 * vm_flags and vm_page_prot are protected by the mmap_sem
69926 * held in write mode.
69927 */
69928 +
69929 +#ifdef CONFIG_PAX_SEGMEXEC
69930 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69931 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69932 +#endif
69933 +
69934 vma->vm_flags = newflags;
69935 +
69936 +#ifdef CONFIG_PAX_MPROTECT
69937 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69938 + mm->binfmt->handle_mprotect(vma, newflags);
69939 +#endif
69940 +
69941 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69942 - vm_get_page_prot(newflags));
69943 + vm_get_page_prot(vma->vm_flags));
69944
69945 if (vma_wants_writenotify(vma)) {
69946 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69947 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69948 end = start + len;
69949 if (end <= start)
69950 return -ENOMEM;
69951 +
69952 +#ifdef CONFIG_PAX_SEGMEXEC
69953 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69954 + if (end > SEGMEXEC_TASK_SIZE)
69955 + return -EINVAL;
69956 + } else
69957 +#endif
69958 +
69959 + if (end > TASK_SIZE)
69960 + return -EINVAL;
69961 +
69962 if (!arch_validate_prot(prot))
69963 return -EINVAL;
69964
69965 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69966 /*
69967 * Does the application expect PROT_READ to imply PROT_EXEC:
69968 */
69969 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69970 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69971 prot |= PROT_EXEC;
69972
69973 vm_flags = calc_vm_prot_bits(prot);
69974 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69975 if (start > vma->vm_start)
69976 prev = vma;
69977
69978 +#ifdef CONFIG_PAX_MPROTECT
69979 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69980 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
69981 +#endif
69982 +
69983 for (nstart = start ; ; ) {
69984 unsigned long newflags;
69985
69986 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69987
69988 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69989 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69990 + if (prot & (PROT_WRITE | PROT_EXEC))
69991 + gr_log_rwxmprotect(vma->vm_file);
69992 +
69993 + error = -EACCES;
69994 + goto out;
69995 + }
69996 +
69997 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69998 error = -EACCES;
69999 goto out;
70000 }
70001 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70002 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70003 if (error)
70004 goto out;
70005 +
70006 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70007 +
70008 nstart = tmp;
70009
70010 if (nstart < prev->vm_end)
70011 diff -urNp linux-3.0.8/mm/mremap.c linux-3.0.8/mm/mremap.c
70012 --- linux-3.0.8/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
70013 +++ linux-3.0.8/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
70014 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
70015 continue;
70016 pte = ptep_clear_flush(vma, old_addr, old_pte);
70017 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70018 +
70019 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70020 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70021 + pte = pte_exprotect(pte);
70022 +#endif
70023 +
70024 set_pte_at(mm, new_addr, new_pte, pte);
70025 }
70026
70027 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
70028 if (is_vm_hugetlb_page(vma))
70029 goto Einval;
70030
70031 +#ifdef CONFIG_PAX_SEGMEXEC
70032 + if (pax_find_mirror_vma(vma))
70033 + goto Einval;
70034 +#endif
70035 +
70036 /* We can't remap across vm area boundaries */
70037 if (old_len > vma->vm_end - addr)
70038 goto Efault;
70039 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
70040 unsigned long ret = -EINVAL;
70041 unsigned long charged = 0;
70042 unsigned long map_flags;
70043 + unsigned long pax_task_size = TASK_SIZE;
70044
70045 if (new_addr & ~PAGE_MASK)
70046 goto out;
70047
70048 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70049 +#ifdef CONFIG_PAX_SEGMEXEC
70050 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70051 + pax_task_size = SEGMEXEC_TASK_SIZE;
70052 +#endif
70053 +
70054 + pax_task_size -= PAGE_SIZE;
70055 +
70056 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70057 goto out;
70058
70059 /* Check if the location we're moving into overlaps the
70060 * old location at all, and fail if it does.
70061 */
70062 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70063 - goto out;
70064 -
70065 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70066 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70067 goto out;
70068
70069 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70070 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
70071 struct vm_area_struct *vma;
70072 unsigned long ret = -EINVAL;
70073 unsigned long charged = 0;
70074 + unsigned long pax_task_size = TASK_SIZE;
70075
70076 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70077 goto out;
70078 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
70079 if (!new_len)
70080 goto out;
70081
70082 +#ifdef CONFIG_PAX_SEGMEXEC
70083 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70084 + pax_task_size = SEGMEXEC_TASK_SIZE;
70085 +#endif
70086 +
70087 + pax_task_size -= PAGE_SIZE;
70088 +
70089 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70090 + old_len > pax_task_size || addr > pax_task_size-old_len)
70091 + goto out;
70092 +
70093 if (flags & MREMAP_FIXED) {
70094 if (flags & MREMAP_MAYMOVE)
70095 ret = mremap_to(addr, old_len, new_addr, new_len);
70096 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
70097 addr + new_len);
70098 }
70099 ret = addr;
70100 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70101 goto out;
70102 }
70103 }
70104 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
70105 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70106 if (ret)
70107 goto out;
70108 +
70109 + map_flags = vma->vm_flags;
70110 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70111 + if (!(ret & ~PAGE_MASK)) {
70112 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70113 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70114 + }
70115 }
70116 out:
70117 if (ret & ~PAGE_MASK)
70118 diff -urNp linux-3.0.8/mm/nobootmem.c linux-3.0.8/mm/nobootmem.c
70119 --- linux-3.0.8/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
70120 +++ linux-3.0.8/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
70121 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
70122 unsigned long __init free_all_memory_core_early(int nodeid)
70123 {
70124 int i;
70125 - u64 start, end;
70126 + u64 start, end, startrange, endrange;
70127 unsigned long count = 0;
70128 - struct range *range = NULL;
70129 + struct range *range = NULL, rangerange = { 0, 0 };
70130 int nr_range;
70131
70132 nr_range = get_free_all_memory_range(&range, nodeid);
70133 + startrange = __pa(range) >> PAGE_SHIFT;
70134 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70135
70136 for (i = 0; i < nr_range; i++) {
70137 start = range[i].start;
70138 end = range[i].end;
70139 + if (start <= endrange && startrange < end) {
70140 + BUG_ON(rangerange.start | rangerange.end);
70141 + rangerange = range[i];
70142 + continue;
70143 + }
70144 count += end - start;
70145 __free_pages_memory(start, end);
70146 }
70147 + start = rangerange.start;
70148 + end = rangerange.end;
70149 + count += end - start;
70150 + __free_pages_memory(start, end);
70151
70152 return count;
70153 }
70154 diff -urNp linux-3.0.8/mm/nommu.c linux-3.0.8/mm/nommu.c
70155 --- linux-3.0.8/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
70156 +++ linux-3.0.8/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
70157 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
70158 int sysctl_overcommit_ratio = 50; /* default is 50% */
70159 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70160 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70161 -int heap_stack_gap = 0;
70162
70163 atomic_long_t mmap_pages_allocated;
70164
70165 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
70166 EXPORT_SYMBOL(find_vma);
70167
70168 /*
70169 - * find a VMA
70170 - * - we don't extend stack VMAs under NOMMU conditions
70171 - */
70172 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70173 -{
70174 - return find_vma(mm, addr);
70175 -}
70176 -
70177 -/*
70178 * expand a stack to a given address
70179 * - not supported under NOMMU conditions
70180 */
70181 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
70182
70183 /* most fields are the same, copy all, and then fixup */
70184 *new = *vma;
70185 + INIT_LIST_HEAD(&new->anon_vma_chain);
70186 *region = *vma->vm_region;
70187 new->vm_region = region;
70188
70189 diff -urNp linux-3.0.8/mm/page_alloc.c linux-3.0.8/mm/page_alloc.c
70190 --- linux-3.0.8/mm/page_alloc.c 2011-10-24 08:05:30.000000000 -0400
70191 +++ linux-3.0.8/mm/page_alloc.c 2011-10-25 09:15:11.000000000 -0400
70192 @@ -340,7 +340,7 @@ out:
70193 * This usage means that zero-order pages may not be compound.
70194 */
70195
70196 -static void free_compound_page(struct page *page)
70197 +void free_compound_page(struct page *page)
70198 {
70199 __free_pages_ok(page, compound_order(page));
70200 }
70201 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
70202 int i;
70203 int bad = 0;
70204
70205 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70206 + unsigned long index = 1UL << order;
70207 +#endif
70208 +
70209 trace_mm_page_free_direct(page, order);
70210 kmemcheck_free_shadow(page, order);
70211
70212 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
70213 debug_check_no_obj_freed(page_address(page),
70214 PAGE_SIZE << order);
70215 }
70216 +
70217 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70218 + for (; index; --index)
70219 + sanitize_highpage(page + index - 1);
70220 +#endif
70221 +
70222 arch_free_page(page, order);
70223 kernel_map_pages(page, 1 << order, 0);
70224
70225 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
70226 arch_alloc_page(page, order);
70227 kernel_map_pages(page, 1 << order, 1);
70228
70229 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70230 if (gfp_flags & __GFP_ZERO)
70231 prep_zero_page(page, order, gfp_flags);
70232 +#endif
70233
70234 if (order && (gfp_flags & __GFP_COMP))
70235 prep_compound_page(page, order);
70236 @@ -2557,6 +2569,8 @@ void show_free_areas(unsigned int filter
70237 int cpu;
70238 struct zone *zone;
70239
70240 + pax_track_stack();
70241 +
70242 for_each_populated_zone(zone) {
70243 if (skip_free_areas_node(filter, zone_to_nid(zone)))
70244 continue;
70245 @@ -3368,7 +3382,13 @@ static int pageblock_is_reserved(unsigne
70246 unsigned long pfn;
70247
70248 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70249 +#ifdef CONFIG_X86_32
70250 + /* boot failures in VMware 8 on 32bit vanilla since
70251 + this change */
70252 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70253 +#else
70254 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70255 +#endif
70256 return 1;
70257 }
70258 return 0;
70259 diff -urNp linux-3.0.8/mm/percpu.c linux-3.0.8/mm/percpu.c
70260 --- linux-3.0.8/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
70261 +++ linux-3.0.8/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
70262 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
70263 static unsigned int pcpu_last_unit_cpu __read_mostly;
70264
70265 /* the address of the first chunk which starts with the kernel static area */
70266 -void *pcpu_base_addr __read_mostly;
70267 +void *pcpu_base_addr __read_only;
70268 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70269
70270 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70271 diff -urNp linux-3.0.8/mm/rmap.c linux-3.0.8/mm/rmap.c
70272 --- linux-3.0.8/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
70273 +++ linux-3.0.8/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
70274 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
70275 struct anon_vma *anon_vma = vma->anon_vma;
70276 struct anon_vma_chain *avc;
70277
70278 +#ifdef CONFIG_PAX_SEGMEXEC
70279 + struct anon_vma_chain *avc_m = NULL;
70280 +#endif
70281 +
70282 might_sleep();
70283 if (unlikely(!anon_vma)) {
70284 struct mm_struct *mm = vma->vm_mm;
70285 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
70286 if (!avc)
70287 goto out_enomem;
70288
70289 +#ifdef CONFIG_PAX_SEGMEXEC
70290 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70291 + if (!avc_m)
70292 + goto out_enomem_free_avc;
70293 +#endif
70294 +
70295 anon_vma = find_mergeable_anon_vma(vma);
70296 allocated = NULL;
70297 if (!anon_vma) {
70298 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
70299 /* page_table_lock to protect against threads */
70300 spin_lock(&mm->page_table_lock);
70301 if (likely(!vma->anon_vma)) {
70302 +
70303 +#ifdef CONFIG_PAX_SEGMEXEC
70304 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70305 +
70306 + if (vma_m) {
70307 + BUG_ON(vma_m->anon_vma);
70308 + vma_m->anon_vma = anon_vma;
70309 + avc_m->anon_vma = anon_vma;
70310 + avc_m->vma = vma;
70311 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70312 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70313 + avc_m = NULL;
70314 + }
70315 +#endif
70316 +
70317 vma->anon_vma = anon_vma;
70318 avc->anon_vma = anon_vma;
70319 avc->vma = vma;
70320 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
70321
70322 if (unlikely(allocated))
70323 put_anon_vma(allocated);
70324 +
70325 +#ifdef CONFIG_PAX_SEGMEXEC
70326 + if (unlikely(avc_m))
70327 + anon_vma_chain_free(avc_m);
70328 +#endif
70329 +
70330 if (unlikely(avc))
70331 anon_vma_chain_free(avc);
70332 }
70333 return 0;
70334
70335 out_enomem_free_avc:
70336 +
70337 +#ifdef CONFIG_PAX_SEGMEXEC
70338 + if (avc_m)
70339 + anon_vma_chain_free(avc_m);
70340 +#endif
70341 +
70342 anon_vma_chain_free(avc);
70343 out_enomem:
70344 return -ENOMEM;
70345 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
70346 * Attach the anon_vmas from src to dst.
70347 * Returns 0 on success, -ENOMEM on failure.
70348 */
70349 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70350 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70351 {
70352 struct anon_vma_chain *avc, *pavc;
70353 struct anon_vma *root = NULL;
70354 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
70355 * the corresponding VMA in the parent process is attached to.
70356 * Returns 0 on success, non-zero on failure.
70357 */
70358 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70359 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70360 {
70361 struct anon_vma_chain *avc;
70362 struct anon_vma *anon_vma;
70363 diff -urNp linux-3.0.8/mm/shmem.c linux-3.0.8/mm/shmem.c
70364 --- linux-3.0.8/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
70365 +++ linux-3.0.8/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
70366 @@ -31,7 +31,7 @@
70367 #include <linux/percpu_counter.h>
70368 #include <linux/swap.h>
70369
70370 -static struct vfsmount *shm_mnt;
70371 +struct vfsmount *shm_mnt;
70372
70373 #ifdef CONFIG_SHMEM
70374 /*
70375 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
70376 goto unlock;
70377 }
70378 entry = shmem_swp_entry(info, index, NULL);
70379 + if (!entry)
70380 + goto unlock;
70381 if (entry->val) {
70382 /*
70383 * The more uptodate page coming down from a stacked
70384 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
70385 struct vm_area_struct pvma;
70386 struct page *page;
70387
70388 + pax_track_stack();
70389 +
70390 spol = mpol_cond_copy(&mpol,
70391 mpol_shared_policy_lookup(&info->policy, idx));
70392
70393 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
70394 int err = -ENOMEM;
70395
70396 /* Round up to L1_CACHE_BYTES to resist false sharing */
70397 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70398 - L1_CACHE_BYTES), GFP_KERNEL);
70399 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70400 if (!sbinfo)
70401 return -ENOMEM;
70402
70403 diff -urNp linux-3.0.8/mm/slab.c linux-3.0.8/mm/slab.c
70404 --- linux-3.0.8/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
70405 +++ linux-3.0.8/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
70406 @@ -151,7 +151,7 @@
70407
70408 /* Legal flag mask for kmem_cache_create(). */
70409 #if DEBUG
70410 -# define CREATE_MASK (SLAB_RED_ZONE | \
70411 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70412 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70413 SLAB_CACHE_DMA | \
70414 SLAB_STORE_USER | \
70415 @@ -159,7 +159,7 @@
70416 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70417 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70418 #else
70419 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70420 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70421 SLAB_CACHE_DMA | \
70422 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70423 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70424 @@ -288,7 +288,7 @@ struct kmem_list3 {
70425 * Need this for bootstrapping a per node allocator.
70426 */
70427 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70428 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70429 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70430 #define CACHE_CACHE 0
70431 #define SIZE_AC MAX_NUMNODES
70432 #define SIZE_L3 (2 * MAX_NUMNODES)
70433 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
70434 if ((x)->max_freeable < i) \
70435 (x)->max_freeable = i; \
70436 } while (0)
70437 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70438 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70439 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70440 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70441 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70442 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70443 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70444 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70445 #else
70446 #define STATS_INC_ACTIVE(x) do { } while (0)
70447 #define STATS_DEC_ACTIVE(x) do { } while (0)
70448 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
70449 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70450 */
70451 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70452 - const struct slab *slab, void *obj)
70453 + const struct slab *slab, const void *obj)
70454 {
70455 u32 offset = (obj - slab->s_mem);
70456 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70457 @@ -564,7 +564,7 @@ struct cache_names {
70458 static struct cache_names __initdata cache_names[] = {
70459 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70460 #include <linux/kmalloc_sizes.h>
70461 - {NULL,}
70462 + {NULL}
70463 #undef CACHE
70464 };
70465
70466 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
70467 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70468 sizes[INDEX_AC].cs_size,
70469 ARCH_KMALLOC_MINALIGN,
70470 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70471 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70472 NULL);
70473
70474 if (INDEX_AC != INDEX_L3) {
70475 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
70476 kmem_cache_create(names[INDEX_L3].name,
70477 sizes[INDEX_L3].cs_size,
70478 ARCH_KMALLOC_MINALIGN,
70479 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70480 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70481 NULL);
70482 }
70483
70484 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
70485 sizes->cs_cachep = kmem_cache_create(names->name,
70486 sizes->cs_size,
70487 ARCH_KMALLOC_MINALIGN,
70488 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70489 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70490 NULL);
70491 }
70492 #ifdef CONFIG_ZONE_DMA
70493 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
70494 }
70495 /* cpu stats */
70496 {
70497 - unsigned long allochit = atomic_read(&cachep->allochit);
70498 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70499 - unsigned long freehit = atomic_read(&cachep->freehit);
70500 - unsigned long freemiss = atomic_read(&cachep->freemiss);
70501 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70502 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70503 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70504 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70505
70506 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70507 allochit, allocmiss, freehit, freemiss);
70508 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
70509
70510 static int __init slab_proc_init(void)
70511 {
70512 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70513 + mode_t gr_mode = S_IRUGO;
70514 +
70515 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70516 + gr_mode = S_IRUSR;
70517 +#endif
70518 +
70519 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70520 #ifdef CONFIG_DEBUG_SLAB_LEAK
70521 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70522 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70523 #endif
70524 return 0;
70525 }
70526 module_init(slab_proc_init);
70527 #endif
70528
70529 +void check_object_size(const void *ptr, unsigned long n, bool to)
70530 +{
70531 +
70532 +#ifdef CONFIG_PAX_USERCOPY
70533 + struct page *page;
70534 + struct kmem_cache *cachep = NULL;
70535 + struct slab *slabp;
70536 + unsigned int objnr;
70537 + unsigned long offset;
70538 +
70539 + if (!n)
70540 + return;
70541 +
70542 + if (ZERO_OR_NULL_PTR(ptr))
70543 + goto report;
70544 +
70545 + if (!virt_addr_valid(ptr))
70546 + return;
70547 +
70548 + page = virt_to_head_page(ptr);
70549 +
70550 + if (!PageSlab(page)) {
70551 + if (object_is_on_stack(ptr, n) == -1)
70552 + goto report;
70553 + return;
70554 + }
70555 +
70556 + cachep = page_get_cache(page);
70557 + if (!(cachep->flags & SLAB_USERCOPY))
70558 + goto report;
70559 +
70560 + slabp = page_get_slab(page);
70561 + objnr = obj_to_index(cachep, slabp, ptr);
70562 + BUG_ON(objnr >= cachep->num);
70563 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70564 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70565 + return;
70566 +
70567 +report:
70568 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70569 +#endif
70570 +
70571 +}
70572 +EXPORT_SYMBOL(check_object_size);
70573 +
70574 /**
70575 * ksize - get the actual amount of memory allocated for a given object
70576 * @objp: Pointer to the object
70577 diff -urNp linux-3.0.8/mm/slob.c linux-3.0.8/mm/slob.c
70578 --- linux-3.0.8/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
70579 +++ linux-3.0.8/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
70580 @@ -29,7 +29,7 @@
70581 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70582 * alloc_pages() directly, allocating compound pages so the page order
70583 * does not have to be separately tracked, and also stores the exact
70584 - * allocation size in page->private so that it can be used to accurately
70585 + * allocation size in slob_page->size so that it can be used to accurately
70586 * provide ksize(). These objects are detected in kfree() because slob_page()
70587 * is false for them.
70588 *
70589 @@ -58,6 +58,7 @@
70590 */
70591
70592 #include <linux/kernel.h>
70593 +#include <linux/sched.h>
70594 #include <linux/slab.h>
70595 #include <linux/mm.h>
70596 #include <linux/swap.h> /* struct reclaim_state */
70597 @@ -102,7 +103,8 @@ struct slob_page {
70598 unsigned long flags; /* mandatory */
70599 atomic_t _count; /* mandatory */
70600 slobidx_t units; /* free units left in page */
70601 - unsigned long pad[2];
70602 + unsigned long pad[1];
70603 + unsigned long size; /* size when >=PAGE_SIZE */
70604 slob_t *free; /* first free slob_t in page */
70605 struct list_head list; /* linked list of free pages */
70606 };
70607 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70608 */
70609 static inline int is_slob_page(struct slob_page *sp)
70610 {
70611 - return PageSlab((struct page *)sp);
70612 + return PageSlab((struct page *)sp) && !sp->size;
70613 }
70614
70615 static inline void set_slob_page(struct slob_page *sp)
70616 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
70617
70618 static inline struct slob_page *slob_page(const void *addr)
70619 {
70620 - return (struct slob_page *)virt_to_page(addr);
70621 + return (struct slob_page *)virt_to_head_page(addr);
70622 }
70623
70624 /*
70625 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
70626 /*
70627 * Return the size of a slob block.
70628 */
70629 -static slobidx_t slob_units(slob_t *s)
70630 +static slobidx_t slob_units(const slob_t *s)
70631 {
70632 if (s->units > 0)
70633 return s->units;
70634 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70635 /*
70636 * Return the next free slob block pointer after this one.
70637 */
70638 -static slob_t *slob_next(slob_t *s)
70639 +static slob_t *slob_next(const slob_t *s)
70640 {
70641 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70642 slobidx_t next;
70643 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70644 /*
70645 * Returns true if s is the last free block in its page.
70646 */
70647 -static int slob_last(slob_t *s)
70648 +static int slob_last(const slob_t *s)
70649 {
70650 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70651 }
70652 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70653 if (!page)
70654 return NULL;
70655
70656 + set_slob_page(page);
70657 return page_address(page);
70658 }
70659
70660 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70661 if (!b)
70662 return NULL;
70663 sp = slob_page(b);
70664 - set_slob_page(sp);
70665
70666 spin_lock_irqsave(&slob_lock, flags);
70667 sp->units = SLOB_UNITS(PAGE_SIZE);
70668 sp->free = b;
70669 + sp->size = 0;
70670 INIT_LIST_HEAD(&sp->list);
70671 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70672 set_slob_page_free(sp, slob_list);
70673 @@ -476,10 +479,9 @@ out:
70674 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70675 */
70676
70677 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70678 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70679 {
70680 - unsigned int *m;
70681 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70682 + slob_t *m;
70683 void *ret;
70684
70685 lockdep_trace_alloc(gfp);
70686 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
70687
70688 if (!m)
70689 return NULL;
70690 - *m = size;
70691 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70692 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70693 + m[0].units = size;
70694 + m[1].units = align;
70695 ret = (void *)m + align;
70696
70697 trace_kmalloc_node(_RET_IP_, ret,
70698 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
70699 gfp |= __GFP_COMP;
70700 ret = slob_new_pages(gfp, order, node);
70701 if (ret) {
70702 - struct page *page;
70703 - page = virt_to_page(ret);
70704 - page->private = size;
70705 + struct slob_page *sp;
70706 + sp = slob_page(ret);
70707 + sp->size = size;
70708 }
70709
70710 trace_kmalloc_node(_RET_IP_, ret,
70711 size, PAGE_SIZE << order, gfp, node);
70712 }
70713
70714 - kmemleak_alloc(ret, size, 1, gfp);
70715 + return ret;
70716 +}
70717 +
70718 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70719 +{
70720 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70721 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70722 +
70723 + if (!ZERO_OR_NULL_PTR(ret))
70724 + kmemleak_alloc(ret, size, 1, gfp);
70725 return ret;
70726 }
70727 EXPORT_SYMBOL(__kmalloc_node);
70728 @@ -531,13 +545,88 @@ void kfree(const void *block)
70729 sp = slob_page(block);
70730 if (is_slob_page(sp)) {
70731 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70732 - unsigned int *m = (unsigned int *)(block - align);
70733 - slob_free(m, *m + align);
70734 - } else
70735 + slob_t *m = (slob_t *)(block - align);
70736 + slob_free(m, m[0].units + align);
70737 + } else {
70738 + clear_slob_page(sp);
70739 + free_slob_page(sp);
70740 + sp->size = 0;
70741 put_page(&sp->page);
70742 + }
70743 }
70744 EXPORT_SYMBOL(kfree);
70745
70746 +void check_object_size(const void *ptr, unsigned long n, bool to)
70747 +{
70748 +
70749 +#ifdef CONFIG_PAX_USERCOPY
70750 + struct slob_page *sp;
70751 + const slob_t *free;
70752 + const void *base;
70753 + unsigned long flags;
70754 +
70755 + if (!n)
70756 + return;
70757 +
70758 + if (ZERO_OR_NULL_PTR(ptr))
70759 + goto report;
70760 +
70761 + if (!virt_addr_valid(ptr))
70762 + return;
70763 +
70764 + sp = slob_page(ptr);
70765 + if (!PageSlab((struct page*)sp)) {
70766 + if (object_is_on_stack(ptr, n) == -1)
70767 + goto report;
70768 + return;
70769 + }
70770 +
70771 + if (sp->size) {
70772 + base = page_address(&sp->page);
70773 + if (base <= ptr && n <= sp->size - (ptr - base))
70774 + return;
70775 + goto report;
70776 + }
70777 +
70778 + /* some tricky double walking to find the chunk */
70779 + spin_lock_irqsave(&slob_lock, flags);
70780 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70781 + free = sp->free;
70782 +
70783 + while (!slob_last(free) && (void *)free <= ptr) {
70784 + base = free + slob_units(free);
70785 + free = slob_next(free);
70786 + }
70787 +
70788 + while (base < (void *)free) {
70789 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70790 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70791 + int offset;
70792 +
70793 + if (ptr < base + align)
70794 + break;
70795 +
70796 + offset = ptr - base - align;
70797 + if (offset >= m) {
70798 + base += size;
70799 + continue;
70800 + }
70801 +
70802 + if (n > m - offset)
70803 + break;
70804 +
70805 + spin_unlock_irqrestore(&slob_lock, flags);
70806 + return;
70807 + }
70808 +
70809 + spin_unlock_irqrestore(&slob_lock, flags);
70810 +report:
70811 + pax_report_usercopy(ptr, n, to, NULL);
70812 +#endif
70813 +
70814 +}
70815 +EXPORT_SYMBOL(check_object_size);
70816 +
70817 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70818 size_t ksize(const void *block)
70819 {
70820 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
70821 sp = slob_page(block);
70822 if (is_slob_page(sp)) {
70823 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70824 - unsigned int *m = (unsigned int *)(block - align);
70825 - return SLOB_UNITS(*m) * SLOB_UNIT;
70826 + slob_t *m = (slob_t *)(block - align);
70827 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70828 } else
70829 - return sp->page.private;
70830 + return sp->size;
70831 }
70832 EXPORT_SYMBOL(ksize);
70833
70834 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
70835 {
70836 struct kmem_cache *c;
70837
70838 +#ifdef CONFIG_PAX_USERCOPY
70839 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70840 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70841 +#else
70842 c = slob_alloc(sizeof(struct kmem_cache),
70843 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70844 +#endif
70845
70846 if (c) {
70847 c->name = name;
70848 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
70849 {
70850 void *b;
70851
70852 +#ifdef CONFIG_PAX_USERCOPY
70853 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70854 +#else
70855 if (c->size < PAGE_SIZE) {
70856 b = slob_alloc(c->size, flags, c->align, node);
70857 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70858 SLOB_UNITS(c->size) * SLOB_UNIT,
70859 flags, node);
70860 } else {
70861 + struct slob_page *sp;
70862 +
70863 b = slob_new_pages(flags, get_order(c->size), node);
70864 + sp = slob_page(b);
70865 + sp->size = c->size;
70866 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70867 PAGE_SIZE << get_order(c->size),
70868 flags, node);
70869 }
70870 +#endif
70871
70872 if (c->ctor)
70873 c->ctor(b);
70874 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70875
70876 static void __kmem_cache_free(void *b, int size)
70877 {
70878 - if (size < PAGE_SIZE)
70879 + struct slob_page *sp = slob_page(b);
70880 +
70881 + if (is_slob_page(sp))
70882 slob_free(b, size);
70883 - else
70884 + else {
70885 + clear_slob_page(sp);
70886 + free_slob_page(sp);
70887 + sp->size = 0;
70888 slob_free_pages(b, get_order(size));
70889 + }
70890 }
70891
70892 static void kmem_rcu_free(struct rcu_head *head)
70893 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
70894
70895 void kmem_cache_free(struct kmem_cache *c, void *b)
70896 {
70897 + int size = c->size;
70898 +
70899 +#ifdef CONFIG_PAX_USERCOPY
70900 + if (size + c->align < PAGE_SIZE) {
70901 + size += c->align;
70902 + b -= c->align;
70903 + }
70904 +#endif
70905 +
70906 kmemleak_free_recursive(b, c->flags);
70907 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70908 struct slob_rcu *slob_rcu;
70909 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70910 - slob_rcu->size = c->size;
70911 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70912 + slob_rcu->size = size;
70913 call_rcu(&slob_rcu->head, kmem_rcu_free);
70914 } else {
70915 - __kmem_cache_free(b, c->size);
70916 + __kmem_cache_free(b, size);
70917 }
70918
70919 +#ifdef CONFIG_PAX_USERCOPY
70920 + trace_kfree(_RET_IP_, b);
70921 +#else
70922 trace_kmem_cache_free(_RET_IP_, b);
70923 +#endif
70924 +
70925 }
70926 EXPORT_SYMBOL(kmem_cache_free);
70927
70928 diff -urNp linux-3.0.8/mm/slub.c linux-3.0.8/mm/slub.c
70929 --- linux-3.0.8/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
70930 +++ linux-3.0.8/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
70931 @@ -200,7 +200,7 @@ struct track {
70932
70933 enum track_item { TRACK_ALLOC, TRACK_FREE };
70934
70935 -#ifdef CONFIG_SYSFS
70936 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70937 static int sysfs_slab_add(struct kmem_cache *);
70938 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70939 static void sysfs_slab_remove(struct kmem_cache *);
70940 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
70941 if (!t->addr)
70942 return;
70943
70944 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70945 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70946 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70947 }
70948
70949 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
70950
70951 page = virt_to_head_page(x);
70952
70953 + BUG_ON(!PageSlab(page));
70954 +
70955 slab_free(s, page, x, _RET_IP_);
70956
70957 trace_kmem_cache_free(_RET_IP_, x);
70958 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
70959 * Merge control. If this is set then no merging of slab caches will occur.
70960 * (Could be removed. This was introduced to pacify the merge skeptics.)
70961 */
70962 -static int slub_nomerge;
70963 +static int slub_nomerge = 1;
70964
70965 /*
70966 * Calculate the order of allocation given an slab object size.
70967 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
70968 * list to avoid pounding the page allocator excessively.
70969 */
70970 set_min_partial(s, ilog2(s->size));
70971 - s->refcount = 1;
70972 + atomic_set(&s->refcount, 1);
70973 #ifdef CONFIG_NUMA
70974 s->remote_node_defrag_ratio = 1000;
70975 #endif
70976 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
70977 void kmem_cache_destroy(struct kmem_cache *s)
70978 {
70979 down_write(&slub_lock);
70980 - s->refcount--;
70981 - if (!s->refcount) {
70982 + if (atomic_dec_and_test(&s->refcount)) {
70983 list_del(&s->list);
70984 if (kmem_cache_close(s)) {
70985 printk(KERN_ERR "SLUB %s: %s called for cache that "
70986 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
70987 EXPORT_SYMBOL(__kmalloc_node);
70988 #endif
70989
70990 +void check_object_size(const void *ptr, unsigned long n, bool to)
70991 +{
70992 +
70993 +#ifdef CONFIG_PAX_USERCOPY
70994 + struct page *page;
70995 + struct kmem_cache *s = NULL;
70996 + unsigned long offset;
70997 +
70998 + if (!n)
70999 + return;
71000 +
71001 + if (ZERO_OR_NULL_PTR(ptr))
71002 + goto report;
71003 +
71004 + if (!virt_addr_valid(ptr))
71005 + return;
71006 +
71007 + page = virt_to_head_page(ptr);
71008 +
71009 + if (!PageSlab(page)) {
71010 + if (object_is_on_stack(ptr, n) == -1)
71011 + goto report;
71012 + return;
71013 + }
71014 +
71015 + s = page->slab;
71016 + if (!(s->flags & SLAB_USERCOPY))
71017 + goto report;
71018 +
71019 + offset = (ptr - page_address(page)) % s->size;
71020 + if (offset <= s->objsize && n <= s->objsize - offset)
71021 + return;
71022 +
71023 +report:
71024 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
71025 +#endif
71026 +
71027 +}
71028 +EXPORT_SYMBOL(check_object_size);
71029 +
71030 size_t ksize(const void *object)
71031 {
71032 struct page *page;
71033 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
71034 int node;
71035
71036 list_add(&s->list, &slab_caches);
71037 - s->refcount = -1;
71038 + atomic_set(&s->refcount, -1);
71039
71040 for_each_node_state(node, N_NORMAL_MEMORY) {
71041 struct kmem_cache_node *n = get_node(s, node);
71042 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
71043
71044 /* Caches that are not of the two-to-the-power-of size */
71045 if (KMALLOC_MIN_SIZE <= 32) {
71046 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71047 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71048 caches++;
71049 }
71050
71051 if (KMALLOC_MIN_SIZE <= 64) {
71052 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71053 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71054 caches++;
71055 }
71056
71057 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71058 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71059 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71060 caches++;
71061 }
71062
71063 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
71064 /*
71065 * We may have set a slab to be unmergeable during bootstrap.
71066 */
71067 - if (s->refcount < 0)
71068 + if (atomic_read(&s->refcount) < 0)
71069 return 1;
71070
71071 return 0;
71072 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
71073 down_write(&slub_lock);
71074 s = find_mergeable(size, align, flags, name, ctor);
71075 if (s) {
71076 - s->refcount++;
71077 + atomic_inc(&s->refcount);
71078 /*
71079 * Adjust the object sizes so that we clear
71080 * the complete object on kzalloc.
71081 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
71082 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71083
71084 if (sysfs_slab_alias(s, name)) {
71085 - s->refcount--;
71086 + atomic_dec(&s->refcount);
71087 goto err;
71088 }
71089 up_write(&slub_lock);
71090 @@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
71091 }
71092 #endif
71093
71094 -#ifdef CONFIG_SYSFS
71095 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71096 static int count_inuse(struct page *page)
71097 {
71098 return page->inuse;
71099 @@ -3935,12 +3976,12 @@ static void resiliency_test(void)
71100 validate_slab_cache(kmalloc_caches[9]);
71101 }
71102 #else
71103 -#ifdef CONFIG_SYSFS
71104 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71105 static void resiliency_test(void) {};
71106 #endif
71107 #endif
71108
71109 -#ifdef CONFIG_SYSFS
71110 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71111 enum slab_stat_type {
71112 SL_ALL, /* All slabs */
71113 SL_PARTIAL, /* Only partially allocated slabs */
71114 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
71115
71116 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71117 {
71118 - return sprintf(buf, "%d\n", s->refcount - 1);
71119 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71120 }
71121 SLAB_ATTR_RO(aliases);
71122
71123 @@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
71124 return name;
71125 }
71126
71127 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71128 static int sysfs_slab_add(struct kmem_cache *s)
71129 {
71130 int err;
71131 @@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
71132 kobject_del(&s->kobj);
71133 kobject_put(&s->kobj);
71134 }
71135 +#endif
71136
71137 /*
71138 * Need to buffer aliases during bootup until sysfs becomes
71139 @@ -4737,6 +4780,7 @@ struct saved_alias {
71140
71141 static struct saved_alias *alias_list;
71142
71143 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71144 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71145 {
71146 struct saved_alias *al;
71147 @@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
71148 alias_list = al;
71149 return 0;
71150 }
71151 +#endif
71152
71153 static int __init slab_sysfs_init(void)
71154 {
71155 @@ -4894,7 +4939,13 @@ static const struct file_operations proc
71156
71157 static int __init slab_proc_init(void)
71158 {
71159 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
71160 + mode_t gr_mode = S_IRUGO;
71161 +
71162 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71163 + gr_mode = S_IRUSR;
71164 +#endif
71165 +
71166 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
71167 return 0;
71168 }
71169 module_init(slab_proc_init);
71170 diff -urNp linux-3.0.8/mm/swap.c linux-3.0.8/mm/swap.c
71171 --- linux-3.0.8/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
71172 +++ linux-3.0.8/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
71173 @@ -31,6 +31,7 @@
71174 #include <linux/backing-dev.h>
71175 #include <linux/memcontrol.h>
71176 #include <linux/gfp.h>
71177 +#include <linux/hugetlb.h>
71178
71179 #include "internal.h"
71180
71181 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
71182
71183 __page_cache_release(page);
71184 dtor = get_compound_page_dtor(page);
71185 + if (!PageHuge(page))
71186 + BUG_ON(dtor != free_compound_page);
71187 (*dtor)(page);
71188 }
71189
71190 diff -urNp linux-3.0.8/mm/swapfile.c linux-3.0.8/mm/swapfile.c
71191 --- linux-3.0.8/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
71192 +++ linux-3.0.8/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
71193 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
71194
71195 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71196 /* Activity counter to indicate that a swapon or swapoff has occurred */
71197 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71198 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71199
71200 static inline unsigned char swap_count(unsigned char ent)
71201 {
71202 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
71203 }
71204 filp_close(swap_file, NULL);
71205 err = 0;
71206 - atomic_inc(&proc_poll_event);
71207 + atomic_inc_unchecked(&proc_poll_event);
71208 wake_up_interruptible(&proc_poll_wait);
71209
71210 out_dput:
71211 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
71212
71213 poll_wait(file, &proc_poll_wait, wait);
71214
71215 - if (s->event != atomic_read(&proc_poll_event)) {
71216 - s->event = atomic_read(&proc_poll_event);
71217 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
71218 + s->event = atomic_read_unchecked(&proc_poll_event);
71219 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71220 }
71221
71222 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
71223 }
71224
71225 s->seq.private = s;
71226 - s->event = atomic_read(&proc_poll_event);
71227 + s->event = atomic_read_unchecked(&proc_poll_event);
71228 return ret;
71229 }
71230
71231 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
71232 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71233
71234 mutex_unlock(&swapon_mutex);
71235 - atomic_inc(&proc_poll_event);
71236 + atomic_inc_unchecked(&proc_poll_event);
71237 wake_up_interruptible(&proc_poll_wait);
71238
71239 if (S_ISREG(inode->i_mode))
71240 diff -urNp linux-3.0.8/mm/util.c linux-3.0.8/mm/util.c
71241 --- linux-3.0.8/mm/util.c 2011-07-21 22:17:23.000000000 -0400
71242 +++ linux-3.0.8/mm/util.c 2011-08-23 21:47:56.000000000 -0400
71243 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71244 * allocated buffer. Use this if you don't want to free the buffer immediately
71245 * like, for example, with RCU.
71246 */
71247 +#undef __krealloc
71248 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71249 {
71250 void *ret;
71251 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71252 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71253 * %NULL pointer, the object pointed to is freed.
71254 */
71255 +#undef krealloc
71256 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71257 {
71258 void *ret;
71259 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
71260 void arch_pick_mmap_layout(struct mm_struct *mm)
71261 {
71262 mm->mmap_base = TASK_UNMAPPED_BASE;
71263 +
71264 +#ifdef CONFIG_PAX_RANDMMAP
71265 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71266 + mm->mmap_base += mm->delta_mmap;
71267 +#endif
71268 +
71269 mm->get_unmapped_area = arch_get_unmapped_area;
71270 mm->unmap_area = arch_unmap_area;
71271 }
71272 diff -urNp linux-3.0.8/mm/vmalloc.c linux-3.0.8/mm/vmalloc.c
71273 --- linux-3.0.8/mm/vmalloc.c 2011-10-24 08:05:30.000000000 -0400
71274 +++ linux-3.0.8/mm/vmalloc.c 2011-10-16 21:55:28.000000000 -0400
71275 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
71276
71277 pte = pte_offset_kernel(pmd, addr);
71278 do {
71279 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71280 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71281 +
71282 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71283 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71284 + BUG_ON(!pte_exec(*pte));
71285 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71286 + continue;
71287 + }
71288 +#endif
71289 +
71290 + {
71291 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71292 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71293 + }
71294 } while (pte++, addr += PAGE_SIZE, addr != end);
71295 }
71296
71297 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
71298 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71299 {
71300 pte_t *pte;
71301 + int ret = -ENOMEM;
71302
71303 /*
71304 * nr is a running index into the array which helps higher level
71305 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
71306 pte = pte_alloc_kernel(pmd, addr);
71307 if (!pte)
71308 return -ENOMEM;
71309 +
71310 + pax_open_kernel();
71311 do {
71312 struct page *page = pages[*nr];
71313
71314 - if (WARN_ON(!pte_none(*pte)))
71315 - return -EBUSY;
71316 - if (WARN_ON(!page))
71317 - return -ENOMEM;
71318 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71319 + if (pgprot_val(prot) & _PAGE_NX)
71320 +#endif
71321 +
71322 + if (WARN_ON(!pte_none(*pte))) {
71323 + ret = -EBUSY;
71324 + goto out;
71325 + }
71326 + if (WARN_ON(!page)) {
71327 + ret = -ENOMEM;
71328 + goto out;
71329 + }
71330 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71331 (*nr)++;
71332 } while (pte++, addr += PAGE_SIZE, addr != end);
71333 - return 0;
71334 + ret = 0;
71335 +out:
71336 + pax_close_kernel();
71337 + return ret;
71338 }
71339
71340 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71341 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
71342 * and fall back on vmalloc() if that fails. Others
71343 * just put it in the vmalloc space.
71344 */
71345 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71346 +#ifdef CONFIG_MODULES
71347 +#ifdef MODULES_VADDR
71348 unsigned long addr = (unsigned long)x;
71349 if (addr >= MODULES_VADDR && addr < MODULES_END)
71350 return 1;
71351 #endif
71352 +
71353 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71354 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71355 + return 1;
71356 +#endif
71357 +
71358 +#endif
71359 +
71360 return is_vmalloc_addr(x);
71361 }
71362
71363 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
71364
71365 if (!pgd_none(*pgd)) {
71366 pud_t *pud = pud_offset(pgd, addr);
71367 +#ifdef CONFIG_X86
71368 + if (!pud_large(*pud))
71369 +#endif
71370 if (!pud_none(*pud)) {
71371 pmd_t *pmd = pmd_offset(pud, addr);
71372 +#ifdef CONFIG_X86
71373 + if (!pmd_large(*pmd))
71374 +#endif
71375 if (!pmd_none(*pmd)) {
71376 pte_t *ptep, pte;
71377
71378 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
71379 struct vm_struct *area;
71380
71381 BUG_ON(in_interrupt());
71382 +
71383 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71384 + if (flags & VM_KERNEXEC) {
71385 + if (start != VMALLOC_START || end != VMALLOC_END)
71386 + return NULL;
71387 + start = (unsigned long)MODULES_EXEC_VADDR;
71388 + end = (unsigned long)MODULES_EXEC_END;
71389 + }
71390 +#endif
71391 +
71392 if (flags & VM_IOREMAP) {
71393 int bit = fls(size);
71394
71395 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
71396 if (count > totalram_pages)
71397 return NULL;
71398
71399 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71400 + if (!(pgprot_val(prot) & _PAGE_NX))
71401 + flags |= VM_KERNEXEC;
71402 +#endif
71403 +
71404 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71405 __builtin_return_address(0));
71406 if (!area)
71407 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
71408 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71409 return NULL;
71410
71411 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71412 + if (!(pgprot_val(prot) & _PAGE_NX))
71413 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
71414 + node, gfp_mask, caller);
71415 + else
71416 +#endif
71417 +
71418 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
71419 gfp_mask, caller);
71420
71421 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
71422 gfp_mask, prot, node, caller);
71423 }
71424
71425 +#undef __vmalloc
71426 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71427 {
71428 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71429 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
71430 * For tight control over page level allocator and protection flags
71431 * use __vmalloc() instead.
71432 */
71433 +#undef vmalloc
71434 void *vmalloc(unsigned long size)
71435 {
71436 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71437 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
71438 * For tight control over page level allocator and protection flags
71439 * use __vmalloc() instead.
71440 */
71441 +#undef vzalloc
71442 void *vzalloc(unsigned long size)
71443 {
71444 return __vmalloc_node_flags(size, -1,
71445 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
71446 * The resulting memory area is zeroed so it can be mapped to userspace
71447 * without leaking data.
71448 */
71449 +#undef vmalloc_user
71450 void *vmalloc_user(unsigned long size)
71451 {
71452 struct vm_struct *area;
71453 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
71454 * For tight control over page level allocator and protection flags
71455 * use __vmalloc() instead.
71456 */
71457 +#undef vmalloc_node
71458 void *vmalloc_node(unsigned long size, int node)
71459 {
71460 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71461 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
71462 * For tight control over page level allocator and protection flags
71463 * use __vmalloc_node() instead.
71464 */
71465 +#undef vzalloc_node
71466 void *vzalloc_node(unsigned long size, int node)
71467 {
71468 return __vmalloc_node_flags(size, node,
71469 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
71470 * For tight control over page level allocator and protection flags
71471 * use __vmalloc() instead.
71472 */
71473 -
71474 +#undef vmalloc_exec
71475 void *vmalloc_exec(unsigned long size)
71476 {
71477 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71478 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71479 -1, __builtin_return_address(0));
71480 }
71481
71482 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
71483 * Allocate enough 32bit PA addressable pages to cover @size from the
71484 * page level allocator and map them into contiguous kernel virtual space.
71485 */
71486 +#undef vmalloc_32
71487 void *vmalloc_32(unsigned long size)
71488 {
71489 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71490 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
71491 * The resulting memory area is 32bit addressable and zeroed so it can be
71492 * mapped to userspace without leaking data.
71493 */
71494 +#undef vmalloc_32_user
71495 void *vmalloc_32_user(unsigned long size)
71496 {
71497 struct vm_struct *area;
71498 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
71499 unsigned long uaddr = vma->vm_start;
71500 unsigned long usize = vma->vm_end - vma->vm_start;
71501
71502 + BUG_ON(vma->vm_mirror);
71503 +
71504 if ((PAGE_SIZE-1) & (unsigned long)addr)
71505 return -EINVAL;
71506
71507 diff -urNp linux-3.0.8/mm/vmstat.c linux-3.0.8/mm/vmstat.c
71508 --- linux-3.0.8/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
71509 +++ linux-3.0.8/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
71510 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71511 *
71512 * vm_stat contains the global counters
71513 */
71514 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71515 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71516 EXPORT_SYMBOL(vm_stat);
71517
71518 #ifdef CONFIG_SMP
71519 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71520 v = p->vm_stat_diff[i];
71521 p->vm_stat_diff[i] = 0;
71522 local_irq_restore(flags);
71523 - atomic_long_add(v, &zone->vm_stat[i]);
71524 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71525 global_diff[i] += v;
71526 #ifdef CONFIG_NUMA
71527 /* 3 seconds idle till flush */
71528 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71529
71530 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71531 if (global_diff[i])
71532 - atomic_long_add(global_diff[i], &vm_stat[i]);
71533 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71534 }
71535
71536 #endif
71537 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
71538 start_cpu_timer(cpu);
71539 #endif
71540 #ifdef CONFIG_PROC_FS
71541 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71542 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71543 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71544 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71545 + {
71546 + mode_t gr_mode = S_IRUGO;
71547 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71548 + gr_mode = S_IRUSR;
71549 +#endif
71550 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71551 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71552 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71553 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71554 +#else
71555 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71556 +#endif
71557 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71558 + }
71559 #endif
71560 return 0;
71561 }
71562 diff -urNp linux-3.0.8/net/8021q/vlan.c linux-3.0.8/net/8021q/vlan.c
71563 --- linux-3.0.8/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
71564 +++ linux-3.0.8/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
71565 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
71566 err = -EPERM;
71567 if (!capable(CAP_NET_ADMIN))
71568 break;
71569 - if ((args.u.name_type >= 0) &&
71570 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71571 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71572 struct vlan_net *vn;
71573
71574 vn = net_generic(net, vlan_net_id);
71575 diff -urNp linux-3.0.8/net/9p/trans_fd.c linux-3.0.8/net/9p/trans_fd.c
71576 --- linux-3.0.8/net/9p/trans_fd.c 2011-07-21 22:17:23.000000000 -0400
71577 +++ linux-3.0.8/net/9p/trans_fd.c 2011-10-06 04:17:55.000000000 -0400
71578 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
71579 oldfs = get_fs();
71580 set_fs(get_ds());
71581 /* The cast to a user pointer is valid due to the set_fs() */
71582 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71583 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71584 set_fs(oldfs);
71585
71586 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71587 diff -urNp linux-3.0.8/net/9p/trans_virtio.c linux-3.0.8/net/9p/trans_virtio.c
71588 --- linux-3.0.8/net/9p/trans_virtio.c 2011-10-24 08:05:30.000000000 -0400
71589 +++ linux-3.0.8/net/9p/trans_virtio.c 2011-10-16 21:55:28.000000000 -0400
71590 @@ -327,7 +327,7 @@ req_retry_pinned:
71591 } else {
71592 char *pbuf;
71593 if (req->tc->pubuf)
71594 - pbuf = (__force char *) req->tc->pubuf;
71595 + pbuf = (char __force_kernel *) req->tc->pubuf;
71596 else
71597 pbuf = req->tc->pkbuf;
71598 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
71599 @@ -357,7 +357,7 @@ req_retry_pinned:
71600 } else {
71601 char *pbuf;
71602 if (req->tc->pubuf)
71603 - pbuf = (__force char *) req->tc->pubuf;
71604 + pbuf = (char __force_kernel *) req->tc->pubuf;
71605 else
71606 pbuf = req->tc->pkbuf;
71607
71608 diff -urNp linux-3.0.8/net/atm/atm_misc.c linux-3.0.8/net/atm/atm_misc.c
71609 --- linux-3.0.8/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
71610 +++ linux-3.0.8/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
71611 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
71612 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71613 return 1;
71614 atm_return(vcc, truesize);
71615 - atomic_inc(&vcc->stats->rx_drop);
71616 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71617 return 0;
71618 }
71619 EXPORT_SYMBOL(atm_charge);
71620 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
71621 }
71622 }
71623 atm_return(vcc, guess);
71624 - atomic_inc(&vcc->stats->rx_drop);
71625 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71626 return NULL;
71627 }
71628 EXPORT_SYMBOL(atm_alloc_charge);
71629 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71630
71631 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71632 {
71633 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71634 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71635 __SONET_ITEMS
71636 #undef __HANDLE_ITEM
71637 }
71638 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71639
71640 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71641 {
71642 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71643 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71644 __SONET_ITEMS
71645 #undef __HANDLE_ITEM
71646 }
71647 diff -urNp linux-3.0.8/net/atm/lec.h linux-3.0.8/net/atm/lec.h
71648 --- linux-3.0.8/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
71649 +++ linux-3.0.8/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
71650 @@ -48,7 +48,7 @@ struct lane2_ops {
71651 const u8 *tlvs, u32 sizeoftlvs);
71652 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71653 const u8 *tlvs, u32 sizeoftlvs);
71654 -};
71655 +} __no_const;
71656
71657 /*
71658 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71659 diff -urNp linux-3.0.8/net/atm/mpc.h linux-3.0.8/net/atm/mpc.h
71660 --- linux-3.0.8/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
71661 +++ linux-3.0.8/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
71662 @@ -33,7 +33,7 @@ struct mpoa_client {
71663 struct mpc_parameters parameters; /* parameters for this client */
71664
71665 const struct net_device_ops *old_ops;
71666 - struct net_device_ops new_ops;
71667 + net_device_ops_no_const new_ops;
71668 };
71669
71670
71671 diff -urNp linux-3.0.8/net/atm/mpoa_caches.c linux-3.0.8/net/atm/mpoa_caches.c
71672 --- linux-3.0.8/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
71673 +++ linux-3.0.8/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
71674 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71675 struct timeval now;
71676 struct k_message msg;
71677
71678 + pax_track_stack();
71679 +
71680 do_gettimeofday(&now);
71681
71682 read_lock_bh(&client->ingress_lock);
71683 diff -urNp linux-3.0.8/net/atm/proc.c linux-3.0.8/net/atm/proc.c
71684 --- linux-3.0.8/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
71685 +++ linux-3.0.8/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
71686 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71687 const struct k_atm_aal_stats *stats)
71688 {
71689 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71690 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71691 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71692 - atomic_read(&stats->rx_drop));
71693 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71694 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71695 + atomic_read_unchecked(&stats->rx_drop));
71696 }
71697
71698 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71699 diff -urNp linux-3.0.8/net/atm/resources.c linux-3.0.8/net/atm/resources.c
71700 --- linux-3.0.8/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
71701 +++ linux-3.0.8/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
71702 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71703 static void copy_aal_stats(struct k_atm_aal_stats *from,
71704 struct atm_aal_stats *to)
71705 {
71706 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71707 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71708 __AAL_STAT_ITEMS
71709 #undef __HANDLE_ITEM
71710 }
71711 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71712 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71713 struct atm_aal_stats *to)
71714 {
71715 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71716 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71717 __AAL_STAT_ITEMS
71718 #undef __HANDLE_ITEM
71719 }
71720 diff -urNp linux-3.0.8/net/batman-adv/hard-interface.c linux-3.0.8/net/batman-adv/hard-interface.c
71721 --- linux-3.0.8/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
71722 +++ linux-3.0.8/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
71723 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
71724 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71725 dev_add_pack(&hard_iface->batman_adv_ptype);
71726
71727 - atomic_set(&hard_iface->seqno, 1);
71728 - atomic_set(&hard_iface->frag_seqno, 1);
71729 + atomic_set_unchecked(&hard_iface->seqno, 1);
71730 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71731 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71732 hard_iface->net_dev->name);
71733
71734 diff -urNp linux-3.0.8/net/batman-adv/routing.c linux-3.0.8/net/batman-adv/routing.c
71735 --- linux-3.0.8/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
71736 +++ linux-3.0.8/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
71737 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
71738 return;
71739
71740 /* could be changed by schedule_own_packet() */
71741 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
71742 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71743
71744 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71745
71746 diff -urNp linux-3.0.8/net/batman-adv/send.c linux-3.0.8/net/batman-adv/send.c
71747 --- linux-3.0.8/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
71748 +++ linux-3.0.8/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
71749 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
71750
71751 /* change sequence number to network order */
71752 batman_packet->seqno =
71753 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
71754 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71755
71756 if (vis_server == VIS_TYPE_SERVER_SYNC)
71757 batman_packet->flags |= VIS_SERVER;
71758 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
71759 else
71760 batman_packet->gw_flags = 0;
71761
71762 - atomic_inc(&hard_iface->seqno);
71763 + atomic_inc_unchecked(&hard_iface->seqno);
71764
71765 slide_own_bcast_window(hard_iface);
71766 send_time = own_send_time(bat_priv);
71767 diff -urNp linux-3.0.8/net/batman-adv/soft-interface.c linux-3.0.8/net/batman-adv/soft-interface.c
71768 --- linux-3.0.8/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
71769 +++ linux-3.0.8/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
71770 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
71771
71772 /* set broadcast sequence number */
71773 bcast_packet->seqno =
71774 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71775 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71776
71777 add_bcast_packet_to_list(bat_priv, skb);
71778
71779 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
71780 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71781
71782 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71783 - atomic_set(&bat_priv->bcast_seqno, 1);
71784 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71785 atomic_set(&bat_priv->tt_local_changed, 0);
71786
71787 bat_priv->primary_if = NULL;
71788 diff -urNp linux-3.0.8/net/batman-adv/types.h linux-3.0.8/net/batman-adv/types.h
71789 --- linux-3.0.8/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
71790 +++ linux-3.0.8/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
71791 @@ -38,8 +38,8 @@ struct hard_iface {
71792 int16_t if_num;
71793 char if_status;
71794 struct net_device *net_dev;
71795 - atomic_t seqno;
71796 - atomic_t frag_seqno;
71797 + atomic_unchecked_t seqno;
71798 + atomic_unchecked_t frag_seqno;
71799 unsigned char *packet_buff;
71800 int packet_len;
71801 struct kobject *hardif_obj;
71802 @@ -142,7 +142,7 @@ struct bat_priv {
71803 atomic_t orig_interval; /* uint */
71804 atomic_t hop_penalty; /* uint */
71805 atomic_t log_level; /* uint */
71806 - atomic_t bcast_seqno;
71807 + atomic_unchecked_t bcast_seqno;
71808 atomic_t bcast_queue_left;
71809 atomic_t batman_queue_left;
71810 char num_ifaces;
71811 diff -urNp linux-3.0.8/net/batman-adv/unicast.c linux-3.0.8/net/batman-adv/unicast.c
71812 --- linux-3.0.8/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
71813 +++ linux-3.0.8/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
71814 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
71815 frag1->flags = UNI_FRAG_HEAD | large_tail;
71816 frag2->flags = large_tail;
71817
71818 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71819 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71820 frag1->seqno = htons(seqno - 1);
71821 frag2->seqno = htons(seqno);
71822
71823 diff -urNp linux-3.0.8/net/bridge/br_multicast.c linux-3.0.8/net/bridge/br_multicast.c
71824 --- linux-3.0.8/net/bridge/br_multicast.c 2011-10-24 08:05:30.000000000 -0400
71825 +++ linux-3.0.8/net/bridge/br_multicast.c 2011-10-16 21:55:28.000000000 -0400
71826 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71827 nexthdr = ip6h->nexthdr;
71828 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71829
71830 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71831 + if (nexthdr != IPPROTO_ICMPV6)
71832 return 0;
71833
71834 /* Okay, we found ICMPv6 header */
71835 diff -urNp linux-3.0.8/net/bridge/netfilter/ebtables.c linux-3.0.8/net/bridge/netfilter/ebtables.c
71836 --- linux-3.0.8/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
71837 +++ linux-3.0.8/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
71838 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
71839 tmp.valid_hooks = t->table->valid_hooks;
71840 }
71841 mutex_unlock(&ebt_mutex);
71842 - if (copy_to_user(user, &tmp, *len) != 0){
71843 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71844 BUGPRINT("c2u Didn't work\n");
71845 ret = -EFAULT;
71846 break;
71847 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
71848 int ret;
71849 void __user *pos;
71850
71851 + pax_track_stack();
71852 +
71853 memset(&tinfo, 0, sizeof(tinfo));
71854
71855 if (cmd == EBT_SO_GET_ENTRIES) {
71856 diff -urNp linux-3.0.8/net/caif/caif_socket.c linux-3.0.8/net/caif/caif_socket.c
71857 --- linux-3.0.8/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
71858 +++ linux-3.0.8/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
71859 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71860 #ifdef CONFIG_DEBUG_FS
71861 struct debug_fs_counter {
71862 atomic_t caif_nr_socks;
71863 - atomic_t caif_sock_create;
71864 - atomic_t num_connect_req;
71865 - atomic_t num_connect_resp;
71866 - atomic_t num_connect_fail_resp;
71867 - atomic_t num_disconnect;
71868 - atomic_t num_remote_shutdown_ind;
71869 - atomic_t num_tx_flow_off_ind;
71870 - atomic_t num_tx_flow_on_ind;
71871 - atomic_t num_rx_flow_off;
71872 - atomic_t num_rx_flow_on;
71873 + atomic_unchecked_t caif_sock_create;
71874 + atomic_unchecked_t num_connect_req;
71875 + atomic_unchecked_t num_connect_resp;
71876 + atomic_unchecked_t num_connect_fail_resp;
71877 + atomic_unchecked_t num_disconnect;
71878 + atomic_unchecked_t num_remote_shutdown_ind;
71879 + atomic_unchecked_t num_tx_flow_off_ind;
71880 + atomic_unchecked_t num_tx_flow_on_ind;
71881 + atomic_unchecked_t num_rx_flow_off;
71882 + atomic_unchecked_t num_rx_flow_on;
71883 };
71884 static struct debug_fs_counter cnt;
71885 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71886 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71887 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71888 #else
71889 #define dbfs_atomic_inc(v) 0
71890 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71891 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71892 sk_rcvbuf_lowwater(cf_sk));
71893 set_rx_flow_off(cf_sk);
71894 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
71895 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71896 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71897 }
71898
71899 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71900 set_rx_flow_off(cf_sk);
71901 if (net_ratelimit())
71902 pr_debug("sending flow OFF due to rmem_schedule\n");
71903 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
71904 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71905 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71906 }
71907 skb->dev = NULL;
71908 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71909 switch (flow) {
71910 case CAIF_CTRLCMD_FLOW_ON_IND:
71911 /* OK from modem to start sending again */
71912 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71913 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71914 set_tx_flow_on(cf_sk);
71915 cf_sk->sk.sk_state_change(&cf_sk->sk);
71916 break;
71917
71918 case CAIF_CTRLCMD_FLOW_OFF_IND:
71919 /* Modem asks us to shut up */
71920 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71921 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71922 set_tx_flow_off(cf_sk);
71923 cf_sk->sk.sk_state_change(&cf_sk->sk);
71924 break;
71925 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71926 /* We're now connected */
71927 caif_client_register_refcnt(&cf_sk->layer,
71928 cfsk_hold, cfsk_put);
71929 - dbfs_atomic_inc(&cnt.num_connect_resp);
71930 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71931 cf_sk->sk.sk_state = CAIF_CONNECTED;
71932 set_tx_flow_on(cf_sk);
71933 cf_sk->sk.sk_state_change(&cf_sk->sk);
71934 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71935
71936 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71937 /* Connect request failed */
71938 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71939 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71940 cf_sk->sk.sk_err = ECONNREFUSED;
71941 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71942 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71943 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71944
71945 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71946 /* Modem has closed this connection, or device is down. */
71947 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71948 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71949 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71950 cf_sk->sk.sk_err = ECONNRESET;
71951 set_rx_flow_on(cf_sk);
71952 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71953 return;
71954
71955 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71956 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
71957 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71958 set_rx_flow_on(cf_sk);
71959 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71960 }
71961 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71962 /*ifindex = id of the interface.*/
71963 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71964
71965 - dbfs_atomic_inc(&cnt.num_connect_req);
71966 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71967 cf_sk->layer.receive = caif_sktrecv_cb;
71968
71969 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71970 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71971 spin_unlock_bh(&sk->sk_receive_queue.lock);
71972 sock->sk = NULL;
71973
71974 - dbfs_atomic_inc(&cnt.num_disconnect);
71975 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71976
71977 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71978 if (cf_sk->debugfs_socket_dir != NULL)
71979 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71980 cf_sk->conn_req.protocol = protocol;
71981 /* Increase the number of sockets created. */
71982 dbfs_atomic_inc(&cnt.caif_nr_socks);
71983 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
71984 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71985 #ifdef CONFIG_DEBUG_FS
71986 if (!IS_ERR(debugfsdir)) {
71987
71988 diff -urNp linux-3.0.8/net/caif/cfctrl.c linux-3.0.8/net/caif/cfctrl.c
71989 --- linux-3.0.8/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
71990 +++ linux-3.0.8/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
71991 @@ -9,6 +9,7 @@
71992 #include <linux/stddef.h>
71993 #include <linux/spinlock.h>
71994 #include <linux/slab.h>
71995 +#include <linux/sched.h>
71996 #include <net/caif/caif_layer.h>
71997 #include <net/caif/cfpkt.h>
71998 #include <net/caif/cfctrl.h>
71999 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
72000 dev_info.id = 0xff;
72001 memset(this, 0, sizeof(*this));
72002 cfsrvl_init(&this->serv, 0, &dev_info, false);
72003 - atomic_set(&this->req_seq_no, 1);
72004 - atomic_set(&this->rsp_seq_no, 1);
72005 + atomic_set_unchecked(&this->req_seq_no, 1);
72006 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72007 this->serv.layer.receive = cfctrl_recv;
72008 sprintf(this->serv.layer.name, "ctrl");
72009 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72010 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
72011 struct cfctrl_request_info *req)
72012 {
72013 spin_lock_bh(&ctrl->info_list_lock);
72014 - atomic_inc(&ctrl->req_seq_no);
72015 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72016 + atomic_inc_unchecked(&ctrl->req_seq_no);
72017 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72018 list_add_tail(&req->list, &ctrl->list);
72019 spin_unlock_bh(&ctrl->info_list_lock);
72020 }
72021 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
72022 if (p != first)
72023 pr_warn("Requests are not received in order\n");
72024
72025 - atomic_set(&ctrl->rsp_seq_no,
72026 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72027 p->sequence_no);
72028 list_del(&p->list);
72029 goto out;
72030 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
72031 struct cfctrl *cfctrl = container_obj(layer);
72032 struct cfctrl_request_info rsp, *req;
72033
72034 + pax_track_stack();
72035
72036 cfpkt_extr_head(pkt, &cmdrsp, 1);
72037 cmd = cmdrsp & CFCTRL_CMD_MASK;
72038 diff -urNp linux-3.0.8/net/compat.c linux-3.0.8/net/compat.c
72039 --- linux-3.0.8/net/compat.c 2011-07-21 22:17:23.000000000 -0400
72040 +++ linux-3.0.8/net/compat.c 2011-10-06 04:17:55.000000000 -0400
72041 @@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
72042 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72043 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72044 return -EFAULT;
72045 - kmsg->msg_name = compat_ptr(tmp1);
72046 - kmsg->msg_iov = compat_ptr(tmp2);
72047 - kmsg->msg_control = compat_ptr(tmp3);
72048 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72049 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72050 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72051 return 0;
72052 }
72053
72054 @@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
72055
72056 if (kern_msg->msg_namelen) {
72057 if (mode == VERIFY_READ) {
72058 - int err = move_addr_to_kernel(kern_msg->msg_name,
72059 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72060 kern_msg->msg_namelen,
72061 kern_address);
72062 if (err < 0)
72063 @@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
72064 kern_msg->msg_name = NULL;
72065
72066 tot_len = iov_from_user_compat_to_kern(kern_iov,
72067 - (struct compat_iovec __user *)kern_msg->msg_iov,
72068 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72069 kern_msg->msg_iovlen);
72070 if (tot_len >= 0)
72071 kern_msg->msg_iov = kern_iov;
72072 @@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
72073
72074 #define CMSG_COMPAT_FIRSTHDR(msg) \
72075 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72076 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72077 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72078 (struct compat_cmsghdr __user *)NULL)
72079
72080 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72081 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72082 (ucmlen) <= (unsigned long) \
72083 ((mhdr)->msg_controllen - \
72084 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72085 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72086
72087 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72088 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72089 {
72090 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72091 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72092 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72093 msg->msg_controllen)
72094 return NULL;
72095 return (struct compat_cmsghdr __user *)ptr;
72096 @@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
72097 {
72098 struct compat_timeval ctv;
72099 struct compat_timespec cts[3];
72100 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72101 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72102 struct compat_cmsghdr cmhdr;
72103 int cmlen;
72104
72105 @@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
72106
72107 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72108 {
72109 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72110 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72111 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72112 int fdnum = scm->fp->count;
72113 struct file **fp = scm->fp->fp;
72114 @@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
72115 return -EFAULT;
72116 old_fs = get_fs();
72117 set_fs(KERNEL_DS);
72118 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72119 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72120 set_fs(old_fs);
72121
72122 return err;
72123 @@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
72124 len = sizeof(ktime);
72125 old_fs = get_fs();
72126 set_fs(KERNEL_DS);
72127 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72128 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72129 set_fs(old_fs);
72130
72131 if (!err) {
72132 @@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
72133 case MCAST_JOIN_GROUP:
72134 case MCAST_LEAVE_GROUP:
72135 {
72136 - struct compat_group_req __user *gr32 = (void *)optval;
72137 + struct compat_group_req __user *gr32 = (void __user *)optval;
72138 struct group_req __user *kgr =
72139 compat_alloc_user_space(sizeof(struct group_req));
72140 u32 interface;
72141 @@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
72142 case MCAST_BLOCK_SOURCE:
72143 case MCAST_UNBLOCK_SOURCE:
72144 {
72145 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72146 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72147 struct group_source_req __user *kgsr = compat_alloc_user_space(
72148 sizeof(struct group_source_req));
72149 u32 interface;
72150 @@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
72151 }
72152 case MCAST_MSFILTER:
72153 {
72154 - struct compat_group_filter __user *gf32 = (void *)optval;
72155 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72156 struct group_filter __user *kgf;
72157 u32 interface, fmode, numsrc;
72158
72159 @@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
72160 char __user *optval, int __user *optlen,
72161 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72162 {
72163 - struct compat_group_filter __user *gf32 = (void *)optval;
72164 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72165 struct group_filter __user *kgf;
72166 int __user *koptlen;
72167 u32 interface, fmode, numsrc;
72168 diff -urNp linux-3.0.8/net/core/datagram.c linux-3.0.8/net/core/datagram.c
72169 --- linux-3.0.8/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
72170 +++ linux-3.0.8/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
72171 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
72172 }
72173
72174 kfree_skb(skb);
72175 - atomic_inc(&sk->sk_drops);
72176 + atomic_inc_unchecked(&sk->sk_drops);
72177 sk_mem_reclaim_partial(sk);
72178
72179 return err;
72180 diff -urNp linux-3.0.8/net/core/dev.c linux-3.0.8/net/core/dev.c
72181 --- linux-3.0.8/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
72182 +++ linux-3.0.8/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
72183 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
72184 if (no_module && capable(CAP_NET_ADMIN))
72185 no_module = request_module("netdev-%s", name);
72186 if (no_module && capable(CAP_SYS_MODULE)) {
72187 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72188 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72189 +#else
72190 if (!request_module("%s", name))
72191 pr_err("Loading kernel module for a network device "
72192 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72193 "instead\n", name);
72194 +#endif
72195 }
72196 }
72197 EXPORT_SYMBOL(dev_load);
72198 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
72199
72200 struct dev_gso_cb {
72201 void (*destructor)(struct sk_buff *skb);
72202 -};
72203 +} __no_const;
72204
72205 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72206
72207 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
72208 }
72209 EXPORT_SYMBOL(netif_rx_ni);
72210
72211 -static void net_tx_action(struct softirq_action *h)
72212 +static void net_tx_action(void)
72213 {
72214 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72215
72216 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
72217 }
72218 EXPORT_SYMBOL(netif_napi_del);
72219
72220 -static void net_rx_action(struct softirq_action *h)
72221 +static void net_rx_action(void)
72222 {
72223 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72224 unsigned long time_limit = jiffies + 2;
72225 diff -urNp linux-3.0.8/net/core/flow.c linux-3.0.8/net/core/flow.c
72226 --- linux-3.0.8/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
72227 +++ linux-3.0.8/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
72228 @@ -60,7 +60,7 @@ struct flow_cache {
72229 struct timer_list rnd_timer;
72230 };
72231
72232 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72233 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72234 EXPORT_SYMBOL(flow_cache_genid);
72235 static struct flow_cache flow_cache_global;
72236 static struct kmem_cache *flow_cachep __read_mostly;
72237 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
72238
72239 static int flow_entry_valid(struct flow_cache_entry *fle)
72240 {
72241 - if (atomic_read(&flow_cache_genid) != fle->genid)
72242 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72243 return 0;
72244 if (fle->object && !fle->object->ops->check(fle->object))
72245 return 0;
72246 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
72247 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72248 fcp->hash_count++;
72249 }
72250 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72251 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72252 flo = fle->object;
72253 if (!flo)
72254 goto ret_object;
72255 @@ -274,7 +274,7 @@ nocache:
72256 }
72257 flo = resolver(net, key, family, dir, flo, ctx);
72258 if (fle) {
72259 - fle->genid = atomic_read(&flow_cache_genid);
72260 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72261 if (!IS_ERR(flo))
72262 fle->object = flo;
72263 else
72264 diff -urNp linux-3.0.8/net/core/iovec.c linux-3.0.8/net/core/iovec.c
72265 --- linux-3.0.8/net/core/iovec.c 2011-07-21 22:17:23.000000000 -0400
72266 +++ linux-3.0.8/net/core/iovec.c 2011-10-06 04:17:55.000000000 -0400
72267 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
72268 if (m->msg_namelen) {
72269 if (mode == VERIFY_READ) {
72270 void __user *namep;
72271 - namep = (void __user __force *) m->msg_name;
72272 + namep = (void __force_user *) m->msg_name;
72273 err = move_addr_to_kernel(namep, m->msg_namelen,
72274 address);
72275 if (err < 0)
72276 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
72277 }
72278
72279 size = m->msg_iovlen * sizeof(struct iovec);
72280 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72281 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72282 return -EFAULT;
72283
72284 m->msg_iov = iov;
72285 diff -urNp linux-3.0.8/net/core/rtnetlink.c linux-3.0.8/net/core/rtnetlink.c
72286 --- linux-3.0.8/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
72287 +++ linux-3.0.8/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
72288 @@ -56,7 +56,7 @@
72289 struct rtnl_link {
72290 rtnl_doit_func doit;
72291 rtnl_dumpit_func dumpit;
72292 -};
72293 +} __no_const;
72294
72295 static DEFINE_MUTEX(rtnl_mutex);
72296
72297 diff -urNp linux-3.0.8/net/core/scm.c linux-3.0.8/net/core/scm.c
72298 --- linux-3.0.8/net/core/scm.c 2011-10-24 08:05:30.000000000 -0400
72299 +++ linux-3.0.8/net/core/scm.c 2011-10-16 21:55:28.000000000 -0400
72300 @@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
72301 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72302 {
72303 struct cmsghdr __user *cm
72304 - = (__force struct cmsghdr __user *)msg->msg_control;
72305 + = (struct cmsghdr __force_user *)msg->msg_control;
72306 struct cmsghdr cmhdr;
72307 int cmlen = CMSG_LEN(len);
72308 int err;
72309 @@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
72310 err = -EFAULT;
72311 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72312 goto out;
72313 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72314 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72315 goto out;
72316 cmlen = CMSG_SPACE(len);
72317 if (msg->msg_controllen < cmlen)
72318 @@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
72319 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72320 {
72321 struct cmsghdr __user *cm
72322 - = (__force struct cmsghdr __user*)msg->msg_control;
72323 + = (struct cmsghdr __force_user *)msg->msg_control;
72324
72325 int fdmax = 0;
72326 int fdnum = scm->fp->count;
72327 @@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
72328 if (fdnum < fdmax)
72329 fdmax = fdnum;
72330
72331 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72332 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72333 i++, cmfptr++)
72334 {
72335 int new_fd;
72336 diff -urNp linux-3.0.8/net/core/skbuff.c linux-3.0.8/net/core/skbuff.c
72337 --- linux-3.0.8/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
72338 +++ linux-3.0.8/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
72339 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
72340 struct sock *sk = skb->sk;
72341 int ret = 0;
72342
72343 + pax_track_stack();
72344 +
72345 if (splice_grow_spd(pipe, &spd))
72346 return -ENOMEM;
72347
72348 diff -urNp linux-3.0.8/net/core/sock.c linux-3.0.8/net/core/sock.c
72349 --- linux-3.0.8/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
72350 +++ linux-3.0.8/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
72351 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72352 */
72353 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
72354 (unsigned)sk->sk_rcvbuf) {
72355 - atomic_inc(&sk->sk_drops);
72356 + atomic_inc_unchecked(&sk->sk_drops);
72357 return -ENOMEM;
72358 }
72359
72360 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72361 return err;
72362
72363 if (!sk_rmem_schedule(sk, skb->truesize)) {
72364 - atomic_inc(&sk->sk_drops);
72365 + atomic_inc_unchecked(&sk->sk_drops);
72366 return -ENOBUFS;
72367 }
72368
72369 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72370 skb_dst_force(skb);
72371
72372 spin_lock_irqsave(&list->lock, flags);
72373 - skb->dropcount = atomic_read(&sk->sk_drops);
72374 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72375 __skb_queue_tail(list, skb);
72376 spin_unlock_irqrestore(&list->lock, flags);
72377
72378 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
72379 skb->dev = NULL;
72380
72381 if (sk_rcvqueues_full(sk, skb)) {
72382 - atomic_inc(&sk->sk_drops);
72383 + atomic_inc_unchecked(&sk->sk_drops);
72384 goto discard_and_relse;
72385 }
72386 if (nested)
72387 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
72388 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72389 } else if (sk_add_backlog(sk, skb)) {
72390 bh_unlock_sock(sk);
72391 - atomic_inc(&sk->sk_drops);
72392 + atomic_inc_unchecked(&sk->sk_drops);
72393 goto discard_and_relse;
72394 }
72395
72396 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
72397 if (len > sizeof(peercred))
72398 len = sizeof(peercred);
72399 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72400 - if (copy_to_user(optval, &peercred, len))
72401 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72402 return -EFAULT;
72403 goto lenout;
72404 }
72405 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
72406 return -ENOTCONN;
72407 if (lv < len)
72408 return -EINVAL;
72409 - if (copy_to_user(optval, address, len))
72410 + if (len > sizeof(address) || copy_to_user(optval, address, len))
72411 return -EFAULT;
72412 goto lenout;
72413 }
72414 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
72415
72416 if (len > lv)
72417 len = lv;
72418 - if (copy_to_user(optval, &v, len))
72419 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
72420 return -EFAULT;
72421 lenout:
72422 if (put_user(len, optlen))
72423 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
72424 */
72425 smp_wmb();
72426 atomic_set(&sk->sk_refcnt, 1);
72427 - atomic_set(&sk->sk_drops, 0);
72428 + atomic_set_unchecked(&sk->sk_drops, 0);
72429 }
72430 EXPORT_SYMBOL(sock_init_data);
72431
72432 diff -urNp linux-3.0.8/net/decnet/sysctl_net_decnet.c linux-3.0.8/net/decnet/sysctl_net_decnet.c
72433 --- linux-3.0.8/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
72434 +++ linux-3.0.8/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
72435 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
72436
72437 if (len > *lenp) len = *lenp;
72438
72439 - if (copy_to_user(buffer, addr, len))
72440 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
72441 return -EFAULT;
72442
72443 *lenp = len;
72444 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
72445
72446 if (len > *lenp) len = *lenp;
72447
72448 - if (copy_to_user(buffer, devname, len))
72449 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
72450 return -EFAULT;
72451
72452 *lenp = len;
72453 diff -urNp linux-3.0.8/net/econet/Kconfig linux-3.0.8/net/econet/Kconfig
72454 --- linux-3.0.8/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
72455 +++ linux-3.0.8/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
72456 @@ -4,7 +4,7 @@
72457
72458 config ECONET
72459 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72460 - depends on EXPERIMENTAL && INET
72461 + depends on EXPERIMENTAL && INET && BROKEN
72462 ---help---
72463 Econet is a fairly old and slow networking protocol mainly used by
72464 Acorn computers to access file and print servers. It uses native
72465 diff -urNp linux-3.0.8/net/ipv4/fib_frontend.c linux-3.0.8/net/ipv4/fib_frontend.c
72466 --- linux-3.0.8/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
72467 +++ linux-3.0.8/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
72468 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
72469 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72470 fib_sync_up(dev);
72471 #endif
72472 - atomic_inc(&net->ipv4.dev_addr_genid);
72473 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72474 rt_cache_flush(dev_net(dev), -1);
72475 break;
72476 case NETDEV_DOWN:
72477 fib_del_ifaddr(ifa, NULL);
72478 - atomic_inc(&net->ipv4.dev_addr_genid);
72479 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72480 if (ifa->ifa_dev->ifa_list == NULL) {
72481 /* Last address was deleted from this interface.
72482 * Disable IP.
72483 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
72484 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72485 fib_sync_up(dev);
72486 #endif
72487 - atomic_inc(&net->ipv4.dev_addr_genid);
72488 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72489 rt_cache_flush(dev_net(dev), -1);
72490 break;
72491 case NETDEV_DOWN:
72492 diff -urNp linux-3.0.8/net/ipv4/fib_semantics.c linux-3.0.8/net/ipv4/fib_semantics.c
72493 --- linux-3.0.8/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
72494 +++ linux-3.0.8/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
72495 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
72496 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72497 nh->nh_gw,
72498 nh->nh_parent->fib_scope);
72499 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72500 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72501
72502 return nh->nh_saddr;
72503 }
72504 diff -urNp linux-3.0.8/net/ipv4/inet_diag.c linux-3.0.8/net/ipv4/inet_diag.c
72505 --- linux-3.0.8/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
72506 +++ linux-3.0.8/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
72507 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
72508 r->idiag_retrans = 0;
72509
72510 r->id.idiag_if = sk->sk_bound_dev_if;
72511 +
72512 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72513 + r->id.idiag_cookie[0] = 0;
72514 + r->id.idiag_cookie[1] = 0;
72515 +#else
72516 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72517 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72518 +#endif
72519
72520 r->id.idiag_sport = inet->inet_sport;
72521 r->id.idiag_dport = inet->inet_dport;
72522 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
72523 r->idiag_family = tw->tw_family;
72524 r->idiag_retrans = 0;
72525 r->id.idiag_if = tw->tw_bound_dev_if;
72526 +
72527 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72528 + r->id.idiag_cookie[0] = 0;
72529 + r->id.idiag_cookie[1] = 0;
72530 +#else
72531 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72532 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72533 +#endif
72534 +
72535 r->id.idiag_sport = tw->tw_sport;
72536 r->id.idiag_dport = tw->tw_dport;
72537 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72538 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
72539 if (sk == NULL)
72540 goto unlock;
72541
72542 +#ifndef CONFIG_GRKERNSEC_HIDESYM
72543 err = -ESTALE;
72544 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72545 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72546 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72547 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72548 goto out;
72549 +#endif
72550
72551 err = -ENOMEM;
72552 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72553 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
72554 r->idiag_retrans = req->retrans;
72555
72556 r->id.idiag_if = sk->sk_bound_dev_if;
72557 +
72558 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72559 + r->id.idiag_cookie[0] = 0;
72560 + r->id.idiag_cookie[1] = 0;
72561 +#else
72562 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72563 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72564 +#endif
72565
72566 tmo = req->expires - jiffies;
72567 if (tmo < 0)
72568 diff -urNp linux-3.0.8/net/ipv4/inet_hashtables.c linux-3.0.8/net/ipv4/inet_hashtables.c
72569 --- linux-3.0.8/net/ipv4/inet_hashtables.c 2011-10-24 08:05:21.000000000 -0400
72570 +++ linux-3.0.8/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
72571 @@ -18,12 +18,15 @@
72572 #include <linux/sched.h>
72573 #include <linux/slab.h>
72574 #include <linux/wait.h>
72575 +#include <linux/security.h>
72576
72577 #include <net/inet_connection_sock.h>
72578 #include <net/inet_hashtables.h>
72579 #include <net/secure_seq.h>
72580 #include <net/ip.h>
72581
72582 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72583 +
72584 /*
72585 * Allocate and initialize a new local port bind bucket.
72586 * The bindhash mutex for snum's hash chain must be held here.
72587 @@ -530,6 +533,8 @@ ok:
72588 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72589 spin_unlock(&head->lock);
72590
72591 + gr_update_task_in_ip_table(current, inet_sk(sk));
72592 +
72593 if (tw) {
72594 inet_twsk_deschedule(tw, death_row);
72595 while (twrefcnt) {
72596 diff -urNp linux-3.0.8/net/ipv4/inetpeer.c linux-3.0.8/net/ipv4/inetpeer.c
72597 --- linux-3.0.8/net/ipv4/inetpeer.c 2011-10-24 08:05:21.000000000 -0400
72598 +++ linux-3.0.8/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
72599 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
72600 unsigned int sequence;
72601 int invalidated, newrefcnt = 0;
72602
72603 + pax_track_stack();
72604 +
72605 /* Look up for the address quickly, lockless.
72606 * Because of a concurrent writer, we might not find an existing entry.
72607 */
72608 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
72609 if (p) {
72610 p->daddr = *daddr;
72611 atomic_set(&p->refcnt, 1);
72612 - atomic_set(&p->rid, 0);
72613 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
72614 + atomic_set_unchecked(&p->rid, 0);
72615 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
72616 p->tcp_ts_stamp = 0;
72617 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
72618 p->rate_tokens = 0;
72619 diff -urNp linux-3.0.8/net/ipv4/ipconfig.c linux-3.0.8/net/ipv4/ipconfig.c
72620 --- linux-3.0.8/net/ipv4/ipconfig.c 2011-07-21 22:17:23.000000000 -0400
72621 +++ linux-3.0.8/net/ipv4/ipconfig.c 2011-10-06 04:17:55.000000000 -0400
72622 @@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
72623
72624 mm_segment_t oldfs = get_fs();
72625 set_fs(get_ds());
72626 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72627 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72628 set_fs(oldfs);
72629 return res;
72630 }
72631 @@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
72632
72633 mm_segment_t oldfs = get_fs();
72634 set_fs(get_ds());
72635 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72636 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72637 set_fs(oldfs);
72638 return res;
72639 }
72640 @@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
72641
72642 mm_segment_t oldfs = get_fs();
72643 set_fs(get_ds());
72644 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72645 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72646 set_fs(oldfs);
72647 return res;
72648 }
72649 diff -urNp linux-3.0.8/net/ipv4/ip_fragment.c linux-3.0.8/net/ipv4/ip_fragment.c
72650 --- linux-3.0.8/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
72651 +++ linux-3.0.8/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
72652 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
72653 return 0;
72654
72655 start = qp->rid;
72656 - end = atomic_inc_return(&peer->rid);
72657 + end = atomic_inc_return_unchecked(&peer->rid);
72658 qp->rid = end;
72659
72660 rc = qp->q.fragments && (end - start) > max;
72661 diff -urNp linux-3.0.8/net/ipv4/ip_sockglue.c linux-3.0.8/net/ipv4/ip_sockglue.c
72662 --- linux-3.0.8/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
72663 +++ linux-3.0.8/net/ipv4/ip_sockglue.c 2011-10-06 04:17:55.000000000 -0400
72664 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72665 int val;
72666 int len;
72667
72668 + pax_track_stack();
72669 +
72670 if (level != SOL_IP)
72671 return -EOPNOTSUPP;
72672
72673 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72674 len = min_t(unsigned int, len, opt->optlen);
72675 if (put_user(len, optlen))
72676 return -EFAULT;
72677 - if (copy_to_user(optval, opt->__data, len))
72678 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72679 + copy_to_user(optval, opt->__data, len))
72680 return -EFAULT;
72681 return 0;
72682 }
72683 @@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72684 if (sk->sk_type != SOCK_STREAM)
72685 return -ENOPROTOOPT;
72686
72687 - msg.msg_control = optval;
72688 + msg.msg_control = (void __force_kernel *)optval;
72689 msg.msg_controllen = len;
72690 msg.msg_flags = 0;
72691
72692 diff -urNp linux-3.0.8/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.8/net/ipv4/netfilter/nf_nat_snmp_basic.c
72693 --- linux-3.0.8/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
72694 +++ linux-3.0.8/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
72695 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72696
72697 *len = 0;
72698
72699 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72700 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72701 if (*octets == NULL) {
72702 if (net_ratelimit())
72703 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72704 diff -urNp linux-3.0.8/net/ipv4/ping.c linux-3.0.8/net/ipv4/ping.c
72705 --- linux-3.0.8/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
72706 +++ linux-3.0.8/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
72707 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72708 sk_rmem_alloc_get(sp),
72709 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72710 atomic_read(&sp->sk_refcnt), sp,
72711 - atomic_read(&sp->sk_drops), len);
72712 + atomic_read_unchecked(&sp->sk_drops), len);
72713 }
72714
72715 static int ping_seq_show(struct seq_file *seq, void *v)
72716 diff -urNp linux-3.0.8/net/ipv4/raw.c linux-3.0.8/net/ipv4/raw.c
72717 --- linux-3.0.8/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
72718 +++ linux-3.0.8/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
72719 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72720 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72721 {
72722 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72723 - atomic_inc(&sk->sk_drops);
72724 + atomic_inc_unchecked(&sk->sk_drops);
72725 kfree_skb(skb);
72726 return NET_RX_DROP;
72727 }
72728 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
72729
72730 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72731 {
72732 + struct icmp_filter filter;
72733 +
72734 if (optlen > sizeof(struct icmp_filter))
72735 optlen = sizeof(struct icmp_filter);
72736 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72737 + if (copy_from_user(&filter, optval, optlen))
72738 return -EFAULT;
72739 + raw_sk(sk)->filter = filter;
72740 return 0;
72741 }
72742
72743 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72744 {
72745 int len, ret = -EFAULT;
72746 + struct icmp_filter filter;
72747
72748 if (get_user(len, optlen))
72749 goto out;
72750 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
72751 if (len > sizeof(struct icmp_filter))
72752 len = sizeof(struct icmp_filter);
72753 ret = -EFAULT;
72754 - if (put_user(len, optlen) ||
72755 - copy_to_user(optval, &raw_sk(sk)->filter, len))
72756 + filter = raw_sk(sk)->filter;
72757 + if (put_user(len, optlen) || len > sizeof filter ||
72758 + copy_to_user(optval, &filter, len))
72759 goto out;
72760 ret = 0;
72761 out: return ret;
72762 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72763 sk_wmem_alloc_get(sp),
72764 sk_rmem_alloc_get(sp),
72765 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72766 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72767 + atomic_read(&sp->sk_refcnt),
72768 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72769 + NULL,
72770 +#else
72771 + sp,
72772 +#endif
72773 + atomic_read_unchecked(&sp->sk_drops));
72774 }
72775
72776 static int raw_seq_show(struct seq_file *seq, void *v)
72777 diff -urNp linux-3.0.8/net/ipv4/route.c linux-3.0.8/net/ipv4/route.c
72778 --- linux-3.0.8/net/ipv4/route.c 2011-10-24 08:05:30.000000000 -0400
72779 +++ linux-3.0.8/net/ipv4/route.c 2011-10-16 21:55:28.000000000 -0400
72780 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
72781
72782 static inline int rt_genid(struct net *net)
72783 {
72784 - return atomic_read(&net->ipv4.rt_genid);
72785 + return atomic_read_unchecked(&net->ipv4.rt_genid);
72786 }
72787
72788 #ifdef CONFIG_PROC_FS
72789 @@ -832,7 +832,7 @@ static void rt_cache_invalidate(struct n
72790 unsigned char shuffle;
72791
72792 get_random_bytes(&shuffle, sizeof(shuffle));
72793 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72794 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72795 }
72796
72797 /*
72798 @@ -2832,7 +2832,7 @@ static int rt_fill_info(struct net *net,
72799 error = rt->dst.error;
72800 if (peer) {
72801 inet_peer_refcheck(rt->peer);
72802 - id = atomic_read(&peer->ip_id_count) & 0xffff;
72803 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72804 if (peer->tcp_ts_stamp) {
72805 ts = peer->tcp_ts;
72806 tsage = get_seconds() - peer->tcp_ts_stamp;
72807 diff -urNp linux-3.0.8/net/ipv4/tcp.c linux-3.0.8/net/ipv4/tcp.c
72808 --- linux-3.0.8/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
72809 +++ linux-3.0.8/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
72810 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72811 int val;
72812 int err = 0;
72813
72814 + pax_track_stack();
72815 +
72816 /* These are data/string values, all the others are ints */
72817 switch (optname) {
72818 case TCP_CONGESTION: {
72819 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72820 struct tcp_sock *tp = tcp_sk(sk);
72821 int val, len;
72822
72823 + pax_track_stack();
72824 +
72825 if (get_user(len, optlen))
72826 return -EFAULT;
72827
72828 diff -urNp linux-3.0.8/net/ipv4/tcp_ipv4.c linux-3.0.8/net/ipv4/tcp_ipv4.c
72829 --- linux-3.0.8/net/ipv4/tcp_ipv4.c 2011-10-24 08:05:21.000000000 -0400
72830 +++ linux-3.0.8/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
72831 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72832 int sysctl_tcp_low_latency __read_mostly;
72833 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72834
72835 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72836 +extern int grsec_enable_blackhole;
72837 +#endif
72838
72839 #ifdef CONFIG_TCP_MD5SIG
72840 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72841 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72842 return 0;
72843
72844 reset:
72845 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72846 + if (!grsec_enable_blackhole)
72847 +#endif
72848 tcp_v4_send_reset(rsk, skb);
72849 discard:
72850 kfree_skb(skb);
72851 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72852 TCP_SKB_CB(skb)->sacked = 0;
72853
72854 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72855 - if (!sk)
72856 + if (!sk) {
72857 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72858 + ret = 1;
72859 +#endif
72860 goto no_tcp_socket;
72861 -
72862 + }
72863 process:
72864 - if (sk->sk_state == TCP_TIME_WAIT)
72865 + if (sk->sk_state == TCP_TIME_WAIT) {
72866 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72867 + ret = 2;
72868 +#endif
72869 goto do_time_wait;
72870 + }
72871
72872 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72873 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72874 @@ -1724,6 +1737,10 @@ no_tcp_socket:
72875 bad_packet:
72876 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72877 } else {
72878 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72879 + if (!grsec_enable_blackhole || (ret == 1 &&
72880 + (skb->dev->flags & IFF_LOOPBACK)))
72881 +#endif
72882 tcp_v4_send_reset(NULL, skb);
72883 }
72884
72885 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
72886 0, /* non standard timer */
72887 0, /* open_requests have no inode */
72888 atomic_read(&sk->sk_refcnt),
72889 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72890 + NULL,
72891 +#else
72892 req,
72893 +#endif
72894 len);
72895 }
72896
72897 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
72898 sock_i_uid(sk),
72899 icsk->icsk_probes_out,
72900 sock_i_ino(sk),
72901 - atomic_read(&sk->sk_refcnt), sk,
72902 + atomic_read(&sk->sk_refcnt),
72903 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72904 + NULL,
72905 +#else
72906 + sk,
72907 +#endif
72908 jiffies_to_clock_t(icsk->icsk_rto),
72909 jiffies_to_clock_t(icsk->icsk_ack.ato),
72910 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72911 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
72912 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72913 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72914 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72915 - atomic_read(&tw->tw_refcnt), tw, len);
72916 + atomic_read(&tw->tw_refcnt),
72917 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72918 + NULL,
72919 +#else
72920 + tw,
72921 +#endif
72922 + len);
72923 }
72924
72925 #define TMPSZ 150
72926 diff -urNp linux-3.0.8/net/ipv4/tcp_minisocks.c linux-3.0.8/net/ipv4/tcp_minisocks.c
72927 --- linux-3.0.8/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
72928 +++ linux-3.0.8/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
72929 @@ -27,6 +27,10 @@
72930 #include <net/inet_common.h>
72931 #include <net/xfrm.h>
72932
72933 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72934 +extern int grsec_enable_blackhole;
72935 +#endif
72936 +
72937 int sysctl_tcp_syncookies __read_mostly = 1;
72938 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72939
72940 @@ -745,6 +749,10 @@ listen_overflow:
72941
72942 embryonic_reset:
72943 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72944 +
72945 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72946 + if (!grsec_enable_blackhole)
72947 +#endif
72948 if (!(flg & TCP_FLAG_RST))
72949 req->rsk_ops->send_reset(sk, skb);
72950
72951 diff -urNp linux-3.0.8/net/ipv4/tcp_output.c linux-3.0.8/net/ipv4/tcp_output.c
72952 --- linux-3.0.8/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
72953 +++ linux-3.0.8/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
72954 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72955 int mss;
72956 int s_data_desired = 0;
72957
72958 + pax_track_stack();
72959 +
72960 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72961 s_data_desired = cvp->s_data_desired;
72962 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72963 diff -urNp linux-3.0.8/net/ipv4/tcp_probe.c linux-3.0.8/net/ipv4/tcp_probe.c
72964 --- linux-3.0.8/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
72965 +++ linux-3.0.8/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
72966 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72967 if (cnt + width >= len)
72968 break;
72969
72970 - if (copy_to_user(buf + cnt, tbuf, width))
72971 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72972 return -EFAULT;
72973 cnt += width;
72974 }
72975 diff -urNp linux-3.0.8/net/ipv4/tcp_timer.c linux-3.0.8/net/ipv4/tcp_timer.c
72976 --- linux-3.0.8/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
72977 +++ linux-3.0.8/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
72978 @@ -22,6 +22,10 @@
72979 #include <linux/gfp.h>
72980 #include <net/tcp.h>
72981
72982 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72983 +extern int grsec_lastack_retries;
72984 +#endif
72985 +
72986 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72987 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72988 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72989 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72990 }
72991 }
72992
72993 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72994 + if ((sk->sk_state == TCP_LAST_ACK) &&
72995 + (grsec_lastack_retries > 0) &&
72996 + (grsec_lastack_retries < retry_until))
72997 + retry_until = grsec_lastack_retries;
72998 +#endif
72999 +
73000 if (retransmits_timed_out(sk, retry_until,
73001 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73002 /* Has it gone just too far? */
73003 diff -urNp linux-3.0.8/net/ipv4/udp.c linux-3.0.8/net/ipv4/udp.c
73004 --- linux-3.0.8/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
73005 +++ linux-3.0.8/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
73006 @@ -86,6 +86,7 @@
73007 #include <linux/types.h>
73008 #include <linux/fcntl.h>
73009 #include <linux/module.h>
73010 +#include <linux/security.h>
73011 #include <linux/socket.h>
73012 #include <linux/sockios.h>
73013 #include <linux/igmp.h>
73014 @@ -107,6 +108,10 @@
73015 #include <net/xfrm.h>
73016 #include "udp_impl.h"
73017
73018 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73019 +extern int grsec_enable_blackhole;
73020 +#endif
73021 +
73022 struct udp_table udp_table __read_mostly;
73023 EXPORT_SYMBOL(udp_table);
73024
73025 @@ -564,6 +569,9 @@ found:
73026 return s;
73027 }
73028
73029 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73030 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73031 +
73032 /*
73033 * This routine is called by the ICMP module when it gets some
73034 * sort of error condition. If err < 0 then the socket should
73035 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
73036 dport = usin->sin_port;
73037 if (dport == 0)
73038 return -EINVAL;
73039 +
73040 + err = gr_search_udp_sendmsg(sk, usin);
73041 + if (err)
73042 + return err;
73043 } else {
73044 if (sk->sk_state != TCP_ESTABLISHED)
73045 return -EDESTADDRREQ;
73046 +
73047 + err = gr_search_udp_sendmsg(sk, NULL);
73048 + if (err)
73049 + return err;
73050 +
73051 daddr = inet->inet_daddr;
73052 dport = inet->inet_dport;
73053 /* Open fast path for connected socket.
73054 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
73055 udp_lib_checksum_complete(skb)) {
73056 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73057 IS_UDPLITE(sk));
73058 - atomic_inc(&sk->sk_drops);
73059 + atomic_inc_unchecked(&sk->sk_drops);
73060 __skb_unlink(skb, rcvq);
73061 __skb_queue_tail(&list_kill, skb);
73062 }
73063 @@ -1184,6 +1201,10 @@ try_again:
73064 if (!skb)
73065 goto out;
73066
73067 + err = gr_search_udp_recvmsg(sk, skb);
73068 + if (err)
73069 + goto out_free;
73070 +
73071 ulen = skb->len - sizeof(struct udphdr);
73072 if (len > ulen)
73073 len = ulen;
73074 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
73075
73076 drop:
73077 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73078 - atomic_inc(&sk->sk_drops);
73079 + atomic_inc_unchecked(&sk->sk_drops);
73080 kfree_skb(skb);
73081 return -1;
73082 }
73083 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
73084 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73085
73086 if (!skb1) {
73087 - atomic_inc(&sk->sk_drops);
73088 + atomic_inc_unchecked(&sk->sk_drops);
73089 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73090 IS_UDPLITE(sk));
73091 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73092 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
73093 goto csum_error;
73094
73095 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73096 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73097 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73098 +#endif
73099 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73100
73101 /*
73102 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
73103 sk_wmem_alloc_get(sp),
73104 sk_rmem_alloc_get(sp),
73105 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73106 - atomic_read(&sp->sk_refcnt), sp,
73107 - atomic_read(&sp->sk_drops), len);
73108 + atomic_read(&sp->sk_refcnt),
73109 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73110 + NULL,
73111 +#else
73112 + sp,
73113 +#endif
73114 + atomic_read_unchecked(&sp->sk_drops), len);
73115 }
73116
73117 int udp4_seq_show(struct seq_file *seq, void *v)
73118 diff -urNp linux-3.0.8/net/ipv6/addrconf.c linux-3.0.8/net/ipv6/addrconf.c
73119 --- linux-3.0.8/net/ipv6/addrconf.c 2011-07-21 22:17:23.000000000 -0400
73120 +++ linux-3.0.8/net/ipv6/addrconf.c 2011-10-06 04:17:55.000000000 -0400
73121 @@ -2072,7 +2072,7 @@ int addrconf_set_dstaddr(struct net *net
73122 p.iph.ihl = 5;
73123 p.iph.protocol = IPPROTO_IPV6;
73124 p.iph.ttl = 64;
73125 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73126 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73127
73128 if (ops->ndo_do_ioctl) {
73129 mm_segment_t oldfs = get_fs();
73130 diff -urNp linux-3.0.8/net/ipv6/inet6_connection_sock.c linux-3.0.8/net/ipv6/inet6_connection_sock.c
73131 --- linux-3.0.8/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
73132 +++ linux-3.0.8/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
73133 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
73134 #ifdef CONFIG_XFRM
73135 {
73136 struct rt6_info *rt = (struct rt6_info *)dst;
73137 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73138 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73139 }
73140 #endif
73141 }
73142 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
73143 #ifdef CONFIG_XFRM
73144 if (dst) {
73145 struct rt6_info *rt = (struct rt6_info *)dst;
73146 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73147 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73148 __sk_dst_reset(sk);
73149 dst = NULL;
73150 }
73151 diff -urNp linux-3.0.8/net/ipv6/ipv6_sockglue.c linux-3.0.8/net/ipv6/ipv6_sockglue.c
73152 --- linux-3.0.8/net/ipv6/ipv6_sockglue.c 2011-10-24 08:05:30.000000000 -0400
73153 +++ linux-3.0.8/net/ipv6/ipv6_sockglue.c 2011-10-16 21:55:28.000000000 -0400
73154 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
73155 int val, valbool;
73156 int retv = -ENOPROTOOPT;
73157
73158 + pax_track_stack();
73159 +
73160 if (optval == NULL)
73161 val=0;
73162 else {
73163 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
73164 int len;
73165 int val;
73166
73167 + pax_track_stack();
73168 +
73169 if (ip6_mroute_opt(optname))
73170 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
73171
73172 @@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
73173 if (sk->sk_type != SOCK_STREAM)
73174 return -ENOPROTOOPT;
73175
73176 - msg.msg_control = optval;
73177 + msg.msg_control = (void __force_kernel *)optval;
73178 msg.msg_controllen = len;
73179 msg.msg_flags = flags;
73180
73181 diff -urNp linux-3.0.8/net/ipv6/raw.c linux-3.0.8/net/ipv6/raw.c
73182 --- linux-3.0.8/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
73183 +++ linux-3.0.8/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
73184 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
73185 {
73186 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
73187 skb_checksum_complete(skb)) {
73188 - atomic_inc(&sk->sk_drops);
73189 + atomic_inc_unchecked(&sk->sk_drops);
73190 kfree_skb(skb);
73191 return NET_RX_DROP;
73192 }
73193 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
73194 struct raw6_sock *rp = raw6_sk(sk);
73195
73196 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73197 - atomic_inc(&sk->sk_drops);
73198 + atomic_inc_unchecked(&sk->sk_drops);
73199 kfree_skb(skb);
73200 return NET_RX_DROP;
73201 }
73202 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
73203
73204 if (inet->hdrincl) {
73205 if (skb_checksum_complete(skb)) {
73206 - atomic_inc(&sk->sk_drops);
73207 + atomic_inc_unchecked(&sk->sk_drops);
73208 kfree_skb(skb);
73209 return NET_RX_DROP;
73210 }
73211 @@ -601,7 +601,7 @@ out:
73212 return err;
73213 }
73214
73215 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73216 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73217 struct flowi6 *fl6, struct dst_entry **dstp,
73218 unsigned int flags)
73219 {
73220 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
73221 u16 proto;
73222 int err;
73223
73224 + pax_track_stack();
73225 +
73226 /* Rough check on arithmetic overflow,
73227 better check is made in ip6_append_data().
73228 */
73229 @@ -909,12 +911,15 @@ do_confirm:
73230 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73231 char __user *optval, int optlen)
73232 {
73233 + struct icmp6_filter filter;
73234 +
73235 switch (optname) {
73236 case ICMPV6_FILTER:
73237 if (optlen > sizeof(struct icmp6_filter))
73238 optlen = sizeof(struct icmp6_filter);
73239 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73240 + if (copy_from_user(&filter, optval, optlen))
73241 return -EFAULT;
73242 + raw6_sk(sk)->filter = filter;
73243 return 0;
73244 default:
73245 return -ENOPROTOOPT;
73246 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
73247 char __user *optval, int __user *optlen)
73248 {
73249 int len;
73250 + struct icmp6_filter filter;
73251
73252 switch (optname) {
73253 case ICMPV6_FILTER:
73254 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
73255 len = sizeof(struct icmp6_filter);
73256 if (put_user(len, optlen))
73257 return -EFAULT;
73258 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73259 + filter = raw6_sk(sk)->filter;
73260 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73261 return -EFAULT;
73262 return 0;
73263 default:
73264 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
73265 0, 0L, 0,
73266 sock_i_uid(sp), 0,
73267 sock_i_ino(sp),
73268 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73269 + atomic_read(&sp->sk_refcnt),
73270 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73271 + NULL,
73272 +#else
73273 + sp,
73274 +#endif
73275 + atomic_read_unchecked(&sp->sk_drops));
73276 }
73277
73278 static int raw6_seq_show(struct seq_file *seq, void *v)
73279 diff -urNp linux-3.0.8/net/ipv6/tcp_ipv6.c linux-3.0.8/net/ipv6/tcp_ipv6.c
73280 --- linux-3.0.8/net/ipv6/tcp_ipv6.c 2011-10-24 08:05:21.000000000 -0400
73281 +++ linux-3.0.8/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
73282 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
73283 }
73284 #endif
73285
73286 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73287 +extern int grsec_enable_blackhole;
73288 +#endif
73289 +
73290 static void tcp_v6_hash(struct sock *sk)
73291 {
73292 if (sk->sk_state != TCP_CLOSE) {
73293 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
73294 return 0;
73295
73296 reset:
73297 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73298 + if (!grsec_enable_blackhole)
73299 +#endif
73300 tcp_v6_send_reset(sk, skb);
73301 discard:
73302 if (opt_skb)
73303 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
73304 TCP_SKB_CB(skb)->sacked = 0;
73305
73306 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73307 - if (!sk)
73308 + if (!sk) {
73309 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73310 + ret = 1;
73311 +#endif
73312 goto no_tcp_socket;
73313 + }
73314
73315 process:
73316 - if (sk->sk_state == TCP_TIME_WAIT)
73317 + if (sk->sk_state == TCP_TIME_WAIT) {
73318 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73319 + ret = 2;
73320 +#endif
73321 goto do_time_wait;
73322 + }
73323
73324 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73325 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73326 @@ -1794,6 +1809,10 @@ no_tcp_socket:
73327 bad_packet:
73328 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73329 } else {
73330 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73331 + if (!grsec_enable_blackhole || (ret == 1 &&
73332 + (skb->dev->flags & IFF_LOOPBACK)))
73333 +#endif
73334 tcp_v6_send_reset(NULL, skb);
73335 }
73336
73337 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
73338 uid,
73339 0, /* non standard timer */
73340 0, /* open_requests have no inode */
73341 - 0, req);
73342 + 0,
73343 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73344 + NULL
73345 +#else
73346 + req
73347 +#endif
73348 + );
73349 }
73350
73351 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73352 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
73353 sock_i_uid(sp),
73354 icsk->icsk_probes_out,
73355 sock_i_ino(sp),
73356 - atomic_read(&sp->sk_refcnt), sp,
73357 + atomic_read(&sp->sk_refcnt),
73358 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73359 + NULL,
73360 +#else
73361 + sp,
73362 +#endif
73363 jiffies_to_clock_t(icsk->icsk_rto),
73364 jiffies_to_clock_t(icsk->icsk_ack.ato),
73365 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73366 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
73367 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73368 tw->tw_substate, 0, 0,
73369 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73370 - atomic_read(&tw->tw_refcnt), tw);
73371 + atomic_read(&tw->tw_refcnt),
73372 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73373 + NULL
73374 +#else
73375 + tw
73376 +#endif
73377 + );
73378 }
73379
73380 static int tcp6_seq_show(struct seq_file *seq, void *v)
73381 diff -urNp linux-3.0.8/net/ipv6/udp.c linux-3.0.8/net/ipv6/udp.c
73382 --- linux-3.0.8/net/ipv6/udp.c 2011-10-24 08:05:32.000000000 -0400
73383 +++ linux-3.0.8/net/ipv6/udp.c 2011-10-17 23:17:19.000000000 -0400
73384 @@ -50,6 +50,10 @@
73385 #include <linux/seq_file.h>
73386 #include "udp_impl.h"
73387
73388 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73389 +extern int grsec_enable_blackhole;
73390 +#endif
73391 +
73392 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73393 {
73394 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73395 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
73396
73397 return 0;
73398 drop:
73399 - atomic_inc(&sk->sk_drops);
73400 + atomic_inc_unchecked(&sk->sk_drops);
73401 drop_no_sk_drops_inc:
73402 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73403 kfree_skb(skb);
73404 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
73405 continue;
73406 }
73407 drop:
73408 - atomic_inc(&sk->sk_drops);
73409 + atomic_inc_unchecked(&sk->sk_drops);
73410 UDP6_INC_STATS_BH(sock_net(sk),
73411 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73412 UDP6_INC_STATS_BH(sock_net(sk),
73413 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73414 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73415 proto == IPPROTO_UDPLITE);
73416
73417 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73418 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73419 +#endif
73420 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73421
73422 kfree_skb(skb);
73423 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73424 if (!sock_owned_by_user(sk))
73425 udpv6_queue_rcv_skb(sk, skb);
73426 else if (sk_add_backlog(sk, skb)) {
73427 - atomic_inc(&sk->sk_drops);
73428 + atomic_inc_unchecked(&sk->sk_drops);
73429 bh_unlock_sock(sk);
73430 sock_put(sk);
73431 goto discard;
73432 @@ -1408,8 +1415,13 @@ static void udp6_sock_seq_show(struct se
73433 0, 0L, 0,
73434 sock_i_uid(sp), 0,
73435 sock_i_ino(sp),
73436 - atomic_read(&sp->sk_refcnt), sp,
73437 - atomic_read(&sp->sk_drops));
73438 + atomic_read(&sp->sk_refcnt),
73439 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73440 + NULL,
73441 +#else
73442 + sp,
73443 +#endif
73444 + atomic_read_unchecked(&sp->sk_drops));
73445 }
73446
73447 int udp6_seq_show(struct seq_file *seq, void *v)
73448 diff -urNp linux-3.0.8/net/irda/ircomm/ircomm_tty.c linux-3.0.8/net/irda/ircomm/ircomm_tty.c
73449 --- linux-3.0.8/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
73450 +++ linux-3.0.8/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
73451 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
73452 add_wait_queue(&self->open_wait, &wait);
73453
73454 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73455 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73456 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73457
73458 /* As far as I can see, we protect open_count - Jean II */
73459 spin_lock_irqsave(&self->spinlock, flags);
73460 if (!tty_hung_up_p(filp)) {
73461 extra_count = 1;
73462 - self->open_count--;
73463 + local_dec(&self->open_count);
73464 }
73465 spin_unlock_irqrestore(&self->spinlock, flags);
73466 - self->blocked_open++;
73467 + local_inc(&self->blocked_open);
73468
73469 while (1) {
73470 if (tty->termios->c_cflag & CBAUD) {
73471 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
73472 }
73473
73474 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73475 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73476 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73477
73478 schedule();
73479 }
73480 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
73481 if (extra_count) {
73482 /* ++ is not atomic, so this should be protected - Jean II */
73483 spin_lock_irqsave(&self->spinlock, flags);
73484 - self->open_count++;
73485 + local_inc(&self->open_count);
73486 spin_unlock_irqrestore(&self->spinlock, flags);
73487 }
73488 - self->blocked_open--;
73489 + local_dec(&self->blocked_open);
73490
73491 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73492 - __FILE__,__LINE__, tty->driver->name, self->open_count);
73493 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73494
73495 if (!retval)
73496 self->flags |= ASYNC_NORMAL_ACTIVE;
73497 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
73498 }
73499 /* ++ is not atomic, so this should be protected - Jean II */
73500 spin_lock_irqsave(&self->spinlock, flags);
73501 - self->open_count++;
73502 + local_inc(&self->open_count);
73503
73504 tty->driver_data = self;
73505 self->tty = tty;
73506 spin_unlock_irqrestore(&self->spinlock, flags);
73507
73508 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73509 - self->line, self->open_count);
73510 + self->line, local_read(&self->open_count));
73511
73512 /* Not really used by us, but lets do it anyway */
73513 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73514 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
73515 return;
73516 }
73517
73518 - if ((tty->count == 1) && (self->open_count != 1)) {
73519 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73520 /*
73521 * Uh, oh. tty->count is 1, which means that the tty
73522 * structure will be freed. state->count should always
73523 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
73524 */
73525 IRDA_DEBUG(0, "%s(), bad serial port count; "
73526 "tty->count is 1, state->count is %d\n", __func__ ,
73527 - self->open_count);
73528 - self->open_count = 1;
73529 + local_read(&self->open_count));
73530 + local_set(&self->open_count, 1);
73531 }
73532
73533 - if (--self->open_count < 0) {
73534 + if (local_dec_return(&self->open_count) < 0) {
73535 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73536 - __func__, self->line, self->open_count);
73537 - self->open_count = 0;
73538 + __func__, self->line, local_read(&self->open_count));
73539 + local_set(&self->open_count, 0);
73540 }
73541 - if (self->open_count) {
73542 + if (local_read(&self->open_count)) {
73543 spin_unlock_irqrestore(&self->spinlock, flags);
73544
73545 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73546 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
73547 tty->closing = 0;
73548 self->tty = NULL;
73549
73550 - if (self->blocked_open) {
73551 + if (local_read(&self->blocked_open)) {
73552 if (self->close_delay)
73553 schedule_timeout_interruptible(self->close_delay);
73554 wake_up_interruptible(&self->open_wait);
73555 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
73556 spin_lock_irqsave(&self->spinlock, flags);
73557 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73558 self->tty = NULL;
73559 - self->open_count = 0;
73560 + local_set(&self->open_count, 0);
73561 spin_unlock_irqrestore(&self->spinlock, flags);
73562
73563 wake_up_interruptible(&self->open_wait);
73564 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
73565 seq_putc(m, '\n');
73566
73567 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73568 - seq_printf(m, "Open count: %d\n", self->open_count);
73569 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73570 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73571 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73572
73573 diff -urNp linux-3.0.8/net/iucv/af_iucv.c linux-3.0.8/net/iucv/af_iucv.c
73574 --- linux-3.0.8/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
73575 +++ linux-3.0.8/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
73576 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
73577
73578 write_lock_bh(&iucv_sk_list.lock);
73579
73580 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73581 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73582 while (__iucv_get_sock_by_name(name)) {
73583 sprintf(name, "%08x",
73584 - atomic_inc_return(&iucv_sk_list.autobind_name));
73585 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73586 }
73587
73588 write_unlock_bh(&iucv_sk_list.lock);
73589 diff -urNp linux-3.0.8/net/key/af_key.c linux-3.0.8/net/key/af_key.c
73590 --- linux-3.0.8/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
73591 +++ linux-3.0.8/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
73592 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
73593 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73594 struct xfrm_kmaddress k;
73595
73596 + pax_track_stack();
73597 +
73598 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73599 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73600 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73601 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
73602 static u32 get_acqseq(void)
73603 {
73604 u32 res;
73605 - static atomic_t acqseq;
73606 + static atomic_unchecked_t acqseq;
73607
73608 do {
73609 - res = atomic_inc_return(&acqseq);
73610 + res = atomic_inc_return_unchecked(&acqseq);
73611 } while (!res);
73612 return res;
73613 }
73614 diff -urNp linux-3.0.8/net/lapb/lapb_iface.c linux-3.0.8/net/lapb/lapb_iface.c
73615 --- linux-3.0.8/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
73616 +++ linux-3.0.8/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
73617 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
73618 goto out;
73619
73620 lapb->dev = dev;
73621 - lapb->callbacks = *callbacks;
73622 + lapb->callbacks = callbacks;
73623
73624 __lapb_insert_cb(lapb);
73625
73626 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
73627
73628 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73629 {
73630 - if (lapb->callbacks.connect_confirmation)
73631 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
73632 + if (lapb->callbacks->connect_confirmation)
73633 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
73634 }
73635
73636 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73637 {
73638 - if (lapb->callbacks.connect_indication)
73639 - lapb->callbacks.connect_indication(lapb->dev, reason);
73640 + if (lapb->callbacks->connect_indication)
73641 + lapb->callbacks->connect_indication(lapb->dev, reason);
73642 }
73643
73644 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73645 {
73646 - if (lapb->callbacks.disconnect_confirmation)
73647 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73648 + if (lapb->callbacks->disconnect_confirmation)
73649 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73650 }
73651
73652 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73653 {
73654 - if (lapb->callbacks.disconnect_indication)
73655 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
73656 + if (lapb->callbacks->disconnect_indication)
73657 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
73658 }
73659
73660 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73661 {
73662 - if (lapb->callbacks.data_indication)
73663 - return lapb->callbacks.data_indication(lapb->dev, skb);
73664 + if (lapb->callbacks->data_indication)
73665 + return lapb->callbacks->data_indication(lapb->dev, skb);
73666
73667 kfree_skb(skb);
73668 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73669 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73670 {
73671 int used = 0;
73672
73673 - if (lapb->callbacks.data_transmit) {
73674 - lapb->callbacks.data_transmit(lapb->dev, skb);
73675 + if (lapb->callbacks->data_transmit) {
73676 + lapb->callbacks->data_transmit(lapb->dev, skb);
73677 used = 1;
73678 }
73679
73680 diff -urNp linux-3.0.8/net/mac80211/debugfs_sta.c linux-3.0.8/net/mac80211/debugfs_sta.c
73681 --- linux-3.0.8/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
73682 +++ linux-3.0.8/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
73683 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73684 struct tid_ampdu_rx *tid_rx;
73685 struct tid_ampdu_tx *tid_tx;
73686
73687 + pax_track_stack();
73688 +
73689 rcu_read_lock();
73690
73691 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73692 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73693 struct sta_info *sta = file->private_data;
73694 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73695
73696 + pax_track_stack();
73697 +
73698 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73699 htc->ht_supported ? "" : "not ");
73700 if (htc->ht_supported) {
73701 diff -urNp linux-3.0.8/net/mac80211/ieee80211_i.h linux-3.0.8/net/mac80211/ieee80211_i.h
73702 --- linux-3.0.8/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
73703 +++ linux-3.0.8/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
73704 @@ -27,6 +27,7 @@
73705 #include <net/ieee80211_radiotap.h>
73706 #include <net/cfg80211.h>
73707 #include <net/mac80211.h>
73708 +#include <asm/local.h>
73709 #include "key.h"
73710 #include "sta_info.h"
73711
73712 @@ -721,7 +722,7 @@ struct ieee80211_local {
73713 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73714 spinlock_t queue_stop_reason_lock;
73715
73716 - int open_count;
73717 + local_t open_count;
73718 int monitors, cooked_mntrs;
73719 /* number of interfaces with corresponding FIF_ flags */
73720 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73721 diff -urNp linux-3.0.8/net/mac80211/iface.c linux-3.0.8/net/mac80211/iface.c
73722 --- linux-3.0.8/net/mac80211/iface.c 2011-10-24 08:05:21.000000000 -0400
73723 +++ linux-3.0.8/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
73724 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73725 break;
73726 }
73727
73728 - if (local->open_count == 0) {
73729 + if (local_read(&local->open_count) == 0) {
73730 res = drv_start(local);
73731 if (res)
73732 goto err_del_bss;
73733 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73734 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73735
73736 if (!is_valid_ether_addr(dev->dev_addr)) {
73737 - if (!local->open_count)
73738 + if (!local_read(&local->open_count))
73739 drv_stop(local);
73740 return -EADDRNOTAVAIL;
73741 }
73742 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73743 mutex_unlock(&local->mtx);
73744
73745 if (coming_up)
73746 - local->open_count++;
73747 + local_inc(&local->open_count);
73748
73749 if (hw_reconf_flags) {
73750 ieee80211_hw_config(local, hw_reconf_flags);
73751 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73752 err_del_interface:
73753 drv_remove_interface(local, &sdata->vif);
73754 err_stop:
73755 - if (!local->open_count)
73756 + if (!local_read(&local->open_count))
73757 drv_stop(local);
73758 err_del_bss:
73759 sdata->bss = NULL;
73760 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
73761 }
73762
73763 if (going_down)
73764 - local->open_count--;
73765 + local_dec(&local->open_count);
73766
73767 switch (sdata->vif.type) {
73768 case NL80211_IFTYPE_AP_VLAN:
73769 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
73770
73771 ieee80211_recalc_ps(local, -1);
73772
73773 - if (local->open_count == 0) {
73774 + if (local_read(&local->open_count) == 0) {
73775 if (local->ops->napi_poll)
73776 napi_disable(&local->napi);
73777 ieee80211_clear_tx_pending(local);
73778 diff -urNp linux-3.0.8/net/mac80211/main.c linux-3.0.8/net/mac80211/main.c
73779 --- linux-3.0.8/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
73780 +++ linux-3.0.8/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
73781 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73782 local->hw.conf.power_level = power;
73783 }
73784
73785 - if (changed && local->open_count) {
73786 + if (changed && local_read(&local->open_count)) {
73787 ret = drv_config(local, changed);
73788 /*
73789 * Goal:
73790 diff -urNp linux-3.0.8/net/mac80211/mlme.c linux-3.0.8/net/mac80211/mlme.c
73791 --- linux-3.0.8/net/mac80211/mlme.c 2011-10-24 08:05:21.000000000 -0400
73792 +++ linux-3.0.8/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
73793 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
73794 bool have_higher_than_11mbit = false;
73795 u16 ap_ht_cap_flags;
73796
73797 + pax_track_stack();
73798 +
73799 /* AssocResp and ReassocResp have identical structure */
73800
73801 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73802 diff -urNp linux-3.0.8/net/mac80211/pm.c linux-3.0.8/net/mac80211/pm.c
73803 --- linux-3.0.8/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
73804 +++ linux-3.0.8/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
73805 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
73806 cancel_work_sync(&local->dynamic_ps_enable_work);
73807 del_timer_sync(&local->dynamic_ps_timer);
73808
73809 - local->wowlan = wowlan && local->open_count;
73810 + local->wowlan = wowlan && local_read(&local->open_count);
73811 if (local->wowlan) {
73812 int err = drv_suspend(local, wowlan);
73813 if (err) {
73814 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
73815 }
73816
73817 /* stop hardware - this must stop RX */
73818 - if (local->open_count)
73819 + if (local_read(&local->open_count))
73820 ieee80211_stop_device(local);
73821
73822 suspend:
73823 diff -urNp linux-3.0.8/net/mac80211/rate.c linux-3.0.8/net/mac80211/rate.c
73824 --- linux-3.0.8/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
73825 +++ linux-3.0.8/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
73826 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73827
73828 ASSERT_RTNL();
73829
73830 - if (local->open_count)
73831 + if (local_read(&local->open_count))
73832 return -EBUSY;
73833
73834 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73835 diff -urNp linux-3.0.8/net/mac80211/rc80211_pid_debugfs.c linux-3.0.8/net/mac80211/rc80211_pid_debugfs.c
73836 --- linux-3.0.8/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
73837 +++ linux-3.0.8/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
73838 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73839
73840 spin_unlock_irqrestore(&events->lock, status);
73841
73842 - if (copy_to_user(buf, pb, p))
73843 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73844 return -EFAULT;
73845
73846 return p;
73847 diff -urNp linux-3.0.8/net/mac80211/util.c linux-3.0.8/net/mac80211/util.c
73848 --- linux-3.0.8/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
73849 +++ linux-3.0.8/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
73850 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
73851 #endif
73852
73853 /* restart hardware */
73854 - if (local->open_count) {
73855 + if (local_read(&local->open_count)) {
73856 /*
73857 * Upon resume hardware can sometimes be goofy due to
73858 * various platform / driver / bus issues, so restarting
73859 diff -urNp linux-3.0.8/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.8/net/netfilter/ipvs/ip_vs_conn.c
73860 --- linux-3.0.8/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
73861 +++ linux-3.0.8/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
73862 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73863 /* Increase the refcnt counter of the dest */
73864 atomic_inc(&dest->refcnt);
73865
73866 - conn_flags = atomic_read(&dest->conn_flags);
73867 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
73868 if (cp->protocol != IPPROTO_UDP)
73869 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73870 /* Bind with the destination and its corresponding transmitter */
73871 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73872 atomic_set(&cp->refcnt, 1);
73873
73874 atomic_set(&cp->n_control, 0);
73875 - atomic_set(&cp->in_pkts, 0);
73876 + atomic_set_unchecked(&cp->in_pkts, 0);
73877
73878 atomic_inc(&ipvs->conn_count);
73879 if (flags & IP_VS_CONN_F_NO_CPORT)
73880 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73881
73882 /* Don't drop the entry if its number of incoming packets is not
73883 located in [0, 8] */
73884 - i = atomic_read(&cp->in_pkts);
73885 + i = atomic_read_unchecked(&cp->in_pkts);
73886 if (i > 8 || i < 0) return 0;
73887
73888 if (!todrop_rate[i]) return 0;
73889 diff -urNp linux-3.0.8/net/netfilter/ipvs/ip_vs_core.c linux-3.0.8/net/netfilter/ipvs/ip_vs_core.c
73890 --- linux-3.0.8/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
73891 +++ linux-3.0.8/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
73892 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73893 ret = cp->packet_xmit(skb, cp, pd->pp);
73894 /* do not touch skb anymore */
73895
73896 - atomic_inc(&cp->in_pkts);
73897 + atomic_inc_unchecked(&cp->in_pkts);
73898 ip_vs_conn_put(cp);
73899 return ret;
73900 }
73901 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73902 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73903 pkts = sysctl_sync_threshold(ipvs);
73904 else
73905 - pkts = atomic_add_return(1, &cp->in_pkts);
73906 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73907
73908 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73909 cp->protocol == IPPROTO_SCTP) {
73910 diff -urNp linux-3.0.8/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.8/net/netfilter/ipvs/ip_vs_ctl.c
73911 --- linux-3.0.8/net/netfilter/ipvs/ip_vs_ctl.c 2011-10-24 08:05:21.000000000 -0400
73912 +++ linux-3.0.8/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
73913 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73914 ip_vs_rs_hash(ipvs, dest);
73915 write_unlock_bh(&ipvs->rs_lock);
73916 }
73917 - atomic_set(&dest->conn_flags, conn_flags);
73918 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
73919
73920 /* bind the service */
73921 if (!dest->svc) {
73922 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73923 " %-7s %-6d %-10d %-10d\n",
73924 &dest->addr.in6,
73925 ntohs(dest->port),
73926 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73927 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73928 atomic_read(&dest->weight),
73929 atomic_read(&dest->activeconns),
73930 atomic_read(&dest->inactconns));
73931 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73932 "%-7s %-6d %-10d %-10d\n",
73933 ntohl(dest->addr.ip),
73934 ntohs(dest->port),
73935 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73936 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73937 atomic_read(&dest->weight),
73938 atomic_read(&dest->activeconns),
73939 atomic_read(&dest->inactconns));
73940 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73941 struct ip_vs_dest_user *udest_compat;
73942 struct ip_vs_dest_user_kern udest;
73943
73944 + pax_track_stack();
73945 +
73946 if (!capable(CAP_NET_ADMIN))
73947 return -EPERM;
73948
73949 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
73950
73951 entry.addr = dest->addr.ip;
73952 entry.port = dest->port;
73953 - entry.conn_flags = atomic_read(&dest->conn_flags);
73954 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73955 entry.weight = atomic_read(&dest->weight);
73956 entry.u_threshold = dest->u_threshold;
73957 entry.l_threshold = dest->l_threshold;
73958 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
73959 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73960
73961 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73962 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73963 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73964 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73965 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73966 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73967 diff -urNp linux-3.0.8/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.8/net/netfilter/ipvs/ip_vs_sync.c
73968 --- linux-3.0.8/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
73969 +++ linux-3.0.8/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
73970 @@ -648,7 +648,7 @@ control:
73971 * i.e only increment in_pkts for Templates.
73972 */
73973 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73974 - int pkts = atomic_add_return(1, &cp->in_pkts);
73975 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73976
73977 if (pkts % sysctl_sync_period(ipvs) != 1)
73978 return;
73979 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
73980
73981 if (opt)
73982 memcpy(&cp->in_seq, opt, sizeof(*opt));
73983 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73984 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73985 cp->state = state;
73986 cp->old_state = cp->state;
73987 /*
73988 diff -urNp linux-3.0.8/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.8/net/netfilter/ipvs/ip_vs_xmit.c
73989 --- linux-3.0.8/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
73990 +++ linux-3.0.8/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
73991 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73992 else
73993 rc = NF_ACCEPT;
73994 /* do not touch skb anymore */
73995 - atomic_inc(&cp->in_pkts);
73996 + atomic_inc_unchecked(&cp->in_pkts);
73997 goto out;
73998 }
73999
74000 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
74001 else
74002 rc = NF_ACCEPT;
74003 /* do not touch skb anymore */
74004 - atomic_inc(&cp->in_pkts);
74005 + atomic_inc_unchecked(&cp->in_pkts);
74006 goto out;
74007 }
74008
74009 diff -urNp linux-3.0.8/net/netfilter/Kconfig linux-3.0.8/net/netfilter/Kconfig
74010 --- linux-3.0.8/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
74011 +++ linux-3.0.8/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
74012 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
74013
74014 To compile it as a module, choose M here. If unsure, say N.
74015
74016 +config NETFILTER_XT_MATCH_GRADM
74017 + tristate '"gradm" match support'
74018 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74019 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74020 + ---help---
74021 + The gradm match allows to match on grsecurity RBAC being enabled.
74022 + It is useful when iptables rules are applied early on bootup to
74023 + prevent connections to the machine (except from a trusted host)
74024 + while the RBAC system is disabled.
74025 +
74026 config NETFILTER_XT_MATCH_HASHLIMIT
74027 tristate '"hashlimit" match support'
74028 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74029 diff -urNp linux-3.0.8/net/netfilter/Makefile linux-3.0.8/net/netfilter/Makefile
74030 --- linux-3.0.8/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
74031 +++ linux-3.0.8/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
74032 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
74033 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74034 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74035 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74036 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74037 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74038 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74039 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74040 diff -urNp linux-3.0.8/net/netfilter/nfnetlink_log.c linux-3.0.8/net/netfilter/nfnetlink_log.c
74041 --- linux-3.0.8/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
74042 +++ linux-3.0.8/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
74043 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74044 };
74045
74046 static DEFINE_SPINLOCK(instances_lock);
74047 -static atomic_t global_seq;
74048 +static atomic_unchecked_t global_seq;
74049
74050 #define INSTANCE_BUCKETS 16
74051 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74052 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
74053 /* global sequence number */
74054 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74055 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74056 - htonl(atomic_inc_return(&global_seq)));
74057 + htonl(atomic_inc_return_unchecked(&global_seq)));
74058
74059 if (data_len) {
74060 struct nlattr *nla;
74061 diff -urNp linux-3.0.8/net/netfilter/nfnetlink_queue.c linux-3.0.8/net/netfilter/nfnetlink_queue.c
74062 --- linux-3.0.8/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
74063 +++ linux-3.0.8/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
74064 @@ -58,7 +58,7 @@ struct nfqnl_instance {
74065 */
74066 spinlock_t lock;
74067 unsigned int queue_total;
74068 - atomic_t id_sequence; /* 'sequence' of pkt ids */
74069 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
74070 struct list_head queue_list; /* packets in queue */
74071 };
74072
74073 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
74074 nfmsg->version = NFNETLINK_V0;
74075 nfmsg->res_id = htons(queue->queue_num);
74076
74077 - entry->id = atomic_inc_return(&queue->id_sequence);
74078 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
74079 pmsg.packet_id = htonl(entry->id);
74080 pmsg.hw_protocol = entskb->protocol;
74081 pmsg.hook = entry->hook;
74082 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
74083 inst->peer_pid, inst->queue_total,
74084 inst->copy_mode, inst->copy_range,
74085 inst->queue_dropped, inst->queue_user_dropped,
74086 - atomic_read(&inst->id_sequence), 1);
74087 + atomic_read_unchecked(&inst->id_sequence), 1);
74088 }
74089
74090 static const struct seq_operations nfqnl_seq_ops = {
74091 diff -urNp linux-3.0.8/net/netfilter/xt_gradm.c linux-3.0.8/net/netfilter/xt_gradm.c
74092 --- linux-3.0.8/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
74093 +++ linux-3.0.8/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
74094 @@ -0,0 +1,51 @@
74095 +/*
74096 + * gradm match for netfilter
74097 + * Copyright © Zbigniew Krzystolik, 2010
74098 + *
74099 + * This program is free software; you can redistribute it and/or modify
74100 + * it under the terms of the GNU General Public License; either version
74101 + * 2 or 3 as published by the Free Software Foundation.
74102 + */
74103 +#include <linux/module.h>
74104 +#include <linux/moduleparam.h>
74105 +#include <linux/skbuff.h>
74106 +#include <linux/netfilter/x_tables.h>
74107 +#include <linux/grsecurity.h>
74108 +#include <linux/netfilter/xt_gradm.h>
74109 +
74110 +static bool
74111 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74112 +{
74113 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74114 + bool retval = false;
74115 + if (gr_acl_is_enabled())
74116 + retval = true;
74117 + return retval ^ info->invflags;
74118 +}
74119 +
74120 +static struct xt_match gradm_mt_reg __read_mostly = {
74121 + .name = "gradm",
74122 + .revision = 0,
74123 + .family = NFPROTO_UNSPEC,
74124 + .match = gradm_mt,
74125 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74126 + .me = THIS_MODULE,
74127 +};
74128 +
74129 +static int __init gradm_mt_init(void)
74130 +{
74131 + return xt_register_match(&gradm_mt_reg);
74132 +}
74133 +
74134 +static void __exit gradm_mt_exit(void)
74135 +{
74136 + xt_unregister_match(&gradm_mt_reg);
74137 +}
74138 +
74139 +module_init(gradm_mt_init);
74140 +module_exit(gradm_mt_exit);
74141 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74142 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74143 +MODULE_LICENSE("GPL");
74144 +MODULE_ALIAS("ipt_gradm");
74145 +MODULE_ALIAS("ip6t_gradm");
74146 diff -urNp linux-3.0.8/net/netfilter/xt_statistic.c linux-3.0.8/net/netfilter/xt_statistic.c
74147 --- linux-3.0.8/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
74148 +++ linux-3.0.8/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
74149 @@ -18,7 +18,7 @@
74150 #include <linux/netfilter/x_tables.h>
74151
74152 struct xt_statistic_priv {
74153 - atomic_t count;
74154 + atomic_unchecked_t count;
74155 } ____cacheline_aligned_in_smp;
74156
74157 MODULE_LICENSE("GPL");
74158 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
74159 break;
74160 case XT_STATISTIC_MODE_NTH:
74161 do {
74162 - oval = atomic_read(&info->master->count);
74163 + oval = atomic_read_unchecked(&info->master->count);
74164 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74165 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74166 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74167 if (nval == 0)
74168 ret = !ret;
74169 break;
74170 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
74171 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74172 if (info->master == NULL)
74173 return -ENOMEM;
74174 - atomic_set(&info->master->count, info->u.nth.count);
74175 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74176
74177 return 0;
74178 }
74179 diff -urNp linux-3.0.8/net/netlink/af_netlink.c linux-3.0.8/net/netlink/af_netlink.c
74180 --- linux-3.0.8/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
74181 +++ linux-3.0.8/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
74182 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
74183 sk->sk_error_report(sk);
74184 }
74185 }
74186 - atomic_inc(&sk->sk_drops);
74187 + atomic_inc_unchecked(&sk->sk_drops);
74188 }
74189
74190 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74191 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
74192 sk_wmem_alloc_get(s),
74193 nlk->cb,
74194 atomic_read(&s->sk_refcnt),
74195 - atomic_read(&s->sk_drops),
74196 + atomic_read_unchecked(&s->sk_drops),
74197 sock_i_ino(s)
74198 );
74199
74200 diff -urNp linux-3.0.8/net/netrom/af_netrom.c linux-3.0.8/net/netrom/af_netrom.c
74201 --- linux-3.0.8/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
74202 +++ linux-3.0.8/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
74203 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
74204 struct sock *sk = sock->sk;
74205 struct nr_sock *nr = nr_sk(sk);
74206
74207 + memset(sax, 0, sizeof(*sax));
74208 lock_sock(sk);
74209 if (peer != 0) {
74210 if (sk->sk_state != TCP_ESTABLISHED) {
74211 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
74212 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74213 } else {
74214 sax->fsa_ax25.sax25_family = AF_NETROM;
74215 - sax->fsa_ax25.sax25_ndigis = 0;
74216 sax->fsa_ax25.sax25_call = nr->source_addr;
74217 *uaddr_len = sizeof(struct sockaddr_ax25);
74218 }
74219 diff -urNp linux-3.0.8/net/packet/af_packet.c linux-3.0.8/net/packet/af_packet.c
74220 --- linux-3.0.8/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
74221 +++ linux-3.0.8/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
74222 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
74223
74224 spin_lock(&sk->sk_receive_queue.lock);
74225 po->stats.tp_packets++;
74226 - skb->dropcount = atomic_read(&sk->sk_drops);
74227 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74228 __skb_queue_tail(&sk->sk_receive_queue, skb);
74229 spin_unlock(&sk->sk_receive_queue.lock);
74230 sk->sk_data_ready(sk, skb->len);
74231 return 0;
74232
74233 drop_n_acct:
74234 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
74235 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
74236
74237 drop_n_restore:
74238 if (skb_head != skb->data && skb_shared(skb)) {
74239 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
74240 case PACKET_HDRLEN:
74241 if (len > sizeof(int))
74242 len = sizeof(int);
74243 - if (copy_from_user(&val, optval, len))
74244 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74245 return -EFAULT;
74246 switch (val) {
74247 case TPACKET_V1:
74248 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
74249
74250 if (put_user(len, optlen))
74251 return -EFAULT;
74252 - if (copy_to_user(optval, data, len))
74253 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74254 return -EFAULT;
74255 return 0;
74256 }
74257 diff -urNp linux-3.0.8/net/phonet/af_phonet.c linux-3.0.8/net/phonet/af_phonet.c
74258 --- linux-3.0.8/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
74259 +++ linux-3.0.8/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
74260 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
74261 {
74262 struct phonet_protocol *pp;
74263
74264 - if (protocol >= PHONET_NPROTO)
74265 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74266 return NULL;
74267
74268 rcu_read_lock();
74269 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
74270 {
74271 int err = 0;
74272
74273 - if (protocol >= PHONET_NPROTO)
74274 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74275 return -EINVAL;
74276
74277 err = proto_register(pp->prot, 1);
74278 diff -urNp linux-3.0.8/net/phonet/pep.c linux-3.0.8/net/phonet/pep.c
74279 --- linux-3.0.8/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
74280 +++ linux-3.0.8/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
74281 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
74282
74283 case PNS_PEP_CTRL_REQ:
74284 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74285 - atomic_inc(&sk->sk_drops);
74286 + atomic_inc_unchecked(&sk->sk_drops);
74287 break;
74288 }
74289 __skb_pull(skb, 4);
74290 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
74291 }
74292
74293 if (pn->rx_credits == 0) {
74294 - atomic_inc(&sk->sk_drops);
74295 + atomic_inc_unchecked(&sk->sk_drops);
74296 err = -ENOBUFS;
74297 break;
74298 }
74299 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
74300 }
74301
74302 if (pn->rx_credits == 0) {
74303 - atomic_inc(&sk->sk_drops);
74304 + atomic_inc_unchecked(&sk->sk_drops);
74305 err = NET_RX_DROP;
74306 break;
74307 }
74308 diff -urNp linux-3.0.8/net/phonet/socket.c linux-3.0.8/net/phonet/socket.c
74309 --- linux-3.0.8/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
74310 +++ linux-3.0.8/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
74311 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
74312 pn->resource, sk->sk_state,
74313 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74314 sock_i_uid(sk), sock_i_ino(sk),
74315 - atomic_read(&sk->sk_refcnt), sk,
74316 - atomic_read(&sk->sk_drops), &len);
74317 + atomic_read(&sk->sk_refcnt),
74318 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74319 + NULL,
74320 +#else
74321 + sk,
74322 +#endif
74323 + atomic_read_unchecked(&sk->sk_drops), &len);
74324 }
74325 seq_printf(seq, "%*s\n", 127 - len, "");
74326 return 0;
74327 diff -urNp linux-3.0.8/net/rds/cong.c linux-3.0.8/net/rds/cong.c
74328 --- linux-3.0.8/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
74329 +++ linux-3.0.8/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
74330 @@ -77,7 +77,7 @@
74331 * finds that the saved generation number is smaller than the global generation
74332 * number, it wakes up the process.
74333 */
74334 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74335 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74336
74337 /*
74338 * Congestion monitoring
74339 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
74340 rdsdebug("waking map %p for %pI4\n",
74341 map, &map->m_addr);
74342 rds_stats_inc(s_cong_update_received);
74343 - atomic_inc(&rds_cong_generation);
74344 + atomic_inc_unchecked(&rds_cong_generation);
74345 if (waitqueue_active(&map->m_waitq))
74346 wake_up(&map->m_waitq);
74347 if (waitqueue_active(&rds_poll_waitq))
74348 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74349
74350 int rds_cong_updated_since(unsigned long *recent)
74351 {
74352 - unsigned long gen = atomic_read(&rds_cong_generation);
74353 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74354
74355 if (likely(*recent == gen))
74356 return 0;
74357 diff -urNp linux-3.0.8/net/rds/ib_cm.c linux-3.0.8/net/rds/ib_cm.c
74358 --- linux-3.0.8/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
74359 +++ linux-3.0.8/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
74360 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
74361 /* Clear the ACK state */
74362 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74363 #ifdef KERNEL_HAS_ATOMIC64
74364 - atomic64_set(&ic->i_ack_next, 0);
74365 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74366 #else
74367 ic->i_ack_next = 0;
74368 #endif
74369 diff -urNp linux-3.0.8/net/rds/ib.h linux-3.0.8/net/rds/ib.h
74370 --- linux-3.0.8/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
74371 +++ linux-3.0.8/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
74372 @@ -127,7 +127,7 @@ struct rds_ib_connection {
74373 /* sending acks */
74374 unsigned long i_ack_flags;
74375 #ifdef KERNEL_HAS_ATOMIC64
74376 - atomic64_t i_ack_next; /* next ACK to send */
74377 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74378 #else
74379 spinlock_t i_ack_lock; /* protect i_ack_next */
74380 u64 i_ack_next; /* next ACK to send */
74381 diff -urNp linux-3.0.8/net/rds/ib_recv.c linux-3.0.8/net/rds/ib_recv.c
74382 --- linux-3.0.8/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
74383 +++ linux-3.0.8/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
74384 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
74385 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74386 int ack_required)
74387 {
74388 - atomic64_set(&ic->i_ack_next, seq);
74389 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74390 if (ack_required) {
74391 smp_mb__before_clear_bit();
74392 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74393 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
74394 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74395 smp_mb__after_clear_bit();
74396
74397 - return atomic64_read(&ic->i_ack_next);
74398 + return atomic64_read_unchecked(&ic->i_ack_next);
74399 }
74400 #endif
74401
74402 diff -urNp linux-3.0.8/net/rds/iw_cm.c linux-3.0.8/net/rds/iw_cm.c
74403 --- linux-3.0.8/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
74404 +++ linux-3.0.8/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
74405 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
74406 /* Clear the ACK state */
74407 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74408 #ifdef KERNEL_HAS_ATOMIC64
74409 - atomic64_set(&ic->i_ack_next, 0);
74410 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74411 #else
74412 ic->i_ack_next = 0;
74413 #endif
74414 diff -urNp linux-3.0.8/net/rds/iw.h linux-3.0.8/net/rds/iw.h
74415 --- linux-3.0.8/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
74416 +++ linux-3.0.8/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
74417 @@ -133,7 +133,7 @@ struct rds_iw_connection {
74418 /* sending acks */
74419 unsigned long i_ack_flags;
74420 #ifdef KERNEL_HAS_ATOMIC64
74421 - atomic64_t i_ack_next; /* next ACK to send */
74422 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74423 #else
74424 spinlock_t i_ack_lock; /* protect i_ack_next */
74425 u64 i_ack_next; /* next ACK to send */
74426 diff -urNp linux-3.0.8/net/rds/iw_rdma.c linux-3.0.8/net/rds/iw_rdma.c
74427 --- linux-3.0.8/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
74428 +++ linux-3.0.8/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
74429 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
74430 struct rdma_cm_id *pcm_id;
74431 int rc;
74432
74433 + pax_track_stack();
74434 +
74435 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
74436 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
74437
74438 diff -urNp linux-3.0.8/net/rds/iw_recv.c linux-3.0.8/net/rds/iw_recv.c
74439 --- linux-3.0.8/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
74440 +++ linux-3.0.8/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
74441 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
74442 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74443 int ack_required)
74444 {
74445 - atomic64_set(&ic->i_ack_next, seq);
74446 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74447 if (ack_required) {
74448 smp_mb__before_clear_bit();
74449 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74450 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
74451 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74452 smp_mb__after_clear_bit();
74453
74454 - return atomic64_read(&ic->i_ack_next);
74455 + return atomic64_read_unchecked(&ic->i_ack_next);
74456 }
74457 #endif
74458
74459 diff -urNp linux-3.0.8/net/rds/tcp.c linux-3.0.8/net/rds/tcp.c
74460 --- linux-3.0.8/net/rds/tcp.c 2011-07-21 22:17:23.000000000 -0400
74461 +++ linux-3.0.8/net/rds/tcp.c 2011-10-06 04:17:55.000000000 -0400
74462 @@ -58,7 +58,7 @@ void rds_tcp_nonagle(struct socket *sock
74463 int val = 1;
74464
74465 set_fs(KERNEL_DS);
74466 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74467 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74468 sizeof(val));
74469 set_fs(oldfs);
74470 }
74471 diff -urNp linux-3.0.8/net/rds/tcp_send.c linux-3.0.8/net/rds/tcp_send.c
74472 --- linux-3.0.8/net/rds/tcp_send.c 2011-07-21 22:17:23.000000000 -0400
74473 +++ linux-3.0.8/net/rds/tcp_send.c 2011-10-06 04:17:55.000000000 -0400
74474 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *
74475
74476 oldfs = get_fs();
74477 set_fs(KERNEL_DS);
74478 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74479 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74480 sizeof(val));
74481 set_fs(oldfs);
74482 }
74483 diff -urNp linux-3.0.8/net/rxrpc/af_rxrpc.c linux-3.0.8/net/rxrpc/af_rxrpc.c
74484 --- linux-3.0.8/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
74485 +++ linux-3.0.8/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
74486 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
74487 __be32 rxrpc_epoch;
74488
74489 /* current debugging ID */
74490 -atomic_t rxrpc_debug_id;
74491 +atomic_unchecked_t rxrpc_debug_id;
74492
74493 /* count of skbs currently in use */
74494 atomic_t rxrpc_n_skbs;
74495 diff -urNp linux-3.0.8/net/rxrpc/ar-ack.c linux-3.0.8/net/rxrpc/ar-ack.c
74496 --- linux-3.0.8/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
74497 +++ linux-3.0.8/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
74498 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
74499
74500 _enter("{%d,%d,%d,%d},",
74501 call->acks_hard, call->acks_unacked,
74502 - atomic_read(&call->sequence),
74503 + atomic_read_unchecked(&call->sequence),
74504 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74505
74506 stop = 0;
74507 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
74508
74509 /* each Tx packet has a new serial number */
74510 sp->hdr.serial =
74511 - htonl(atomic_inc_return(&call->conn->serial));
74512 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74513
74514 hdr = (struct rxrpc_header *) txb->head;
74515 hdr->serial = sp->hdr.serial;
74516 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
74517 */
74518 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74519 {
74520 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74521 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74522 }
74523
74524 /*
74525 @@ -629,7 +629,7 @@ process_further:
74526
74527 latest = ntohl(sp->hdr.serial);
74528 hard = ntohl(ack.firstPacket);
74529 - tx = atomic_read(&call->sequence);
74530 + tx = atomic_read_unchecked(&call->sequence);
74531
74532 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74533 latest,
74534 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
74535 u32 abort_code = RX_PROTOCOL_ERROR;
74536 u8 *acks = NULL;
74537
74538 + pax_track_stack();
74539 +
74540 //printk("\n--------------------\n");
74541 _enter("{%d,%s,%lx} [%lu]",
74542 call->debug_id, rxrpc_call_states[call->state], call->events,
74543 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
74544 goto maybe_reschedule;
74545
74546 send_ACK_with_skew:
74547 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74548 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74549 ntohl(ack.serial));
74550 send_ACK:
74551 mtu = call->conn->trans->peer->if_mtu;
74552 @@ -1173,7 +1175,7 @@ send_ACK:
74553 ackinfo.rxMTU = htonl(5692);
74554 ackinfo.jumbo_max = htonl(4);
74555
74556 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74557 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74558 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74559 ntohl(hdr.serial),
74560 ntohs(ack.maxSkew),
74561 @@ -1191,7 +1193,7 @@ send_ACK:
74562 send_message:
74563 _debug("send message");
74564
74565 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74566 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74567 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74568 send_message_2:
74569
74570 diff -urNp linux-3.0.8/net/rxrpc/ar-call.c linux-3.0.8/net/rxrpc/ar-call.c
74571 --- linux-3.0.8/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
74572 +++ linux-3.0.8/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
74573 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
74574 spin_lock_init(&call->lock);
74575 rwlock_init(&call->state_lock);
74576 atomic_set(&call->usage, 1);
74577 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74578 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74579 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74580
74581 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74582 diff -urNp linux-3.0.8/net/rxrpc/ar-connection.c linux-3.0.8/net/rxrpc/ar-connection.c
74583 --- linux-3.0.8/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
74584 +++ linux-3.0.8/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
74585 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
74586 rwlock_init(&conn->lock);
74587 spin_lock_init(&conn->state_lock);
74588 atomic_set(&conn->usage, 1);
74589 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
74590 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74591 conn->avail_calls = RXRPC_MAXCALLS;
74592 conn->size_align = 4;
74593 conn->header_size = sizeof(struct rxrpc_header);
74594 diff -urNp linux-3.0.8/net/rxrpc/ar-connevent.c linux-3.0.8/net/rxrpc/ar-connevent.c
74595 --- linux-3.0.8/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
74596 +++ linux-3.0.8/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
74597 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
74598
74599 len = iov[0].iov_len + iov[1].iov_len;
74600
74601 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74602 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74603 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
74604
74605 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74606 diff -urNp linux-3.0.8/net/rxrpc/ar-input.c linux-3.0.8/net/rxrpc/ar-input.c
74607 --- linux-3.0.8/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
74608 +++ linux-3.0.8/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
74609 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
74610 /* track the latest serial number on this connection for ACK packet
74611 * information */
74612 serial = ntohl(sp->hdr.serial);
74613 - hi_serial = atomic_read(&call->conn->hi_serial);
74614 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
74615 while (serial > hi_serial)
74616 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
74617 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
74618 serial);
74619
74620 /* request ACK generation for any ACK or DATA packet that requests
74621 diff -urNp linux-3.0.8/net/rxrpc/ar-internal.h linux-3.0.8/net/rxrpc/ar-internal.h
74622 --- linux-3.0.8/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
74623 +++ linux-3.0.8/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
74624 @@ -272,8 +272,8 @@ struct rxrpc_connection {
74625 int error; /* error code for local abort */
74626 int debug_id; /* debug ID for printks */
74627 unsigned call_counter; /* call ID counter */
74628 - atomic_t serial; /* packet serial number counter */
74629 - atomic_t hi_serial; /* highest serial number received */
74630 + atomic_unchecked_t serial; /* packet serial number counter */
74631 + atomic_unchecked_t hi_serial; /* highest serial number received */
74632 u8 avail_calls; /* number of calls available */
74633 u8 size_align; /* data size alignment (for security) */
74634 u8 header_size; /* rxrpc + security header size */
74635 @@ -346,7 +346,7 @@ struct rxrpc_call {
74636 spinlock_t lock;
74637 rwlock_t state_lock; /* lock for state transition */
74638 atomic_t usage;
74639 - atomic_t sequence; /* Tx data packet sequence counter */
74640 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
74641 u32 abort_code; /* local/remote abort code */
74642 enum { /* current state of call */
74643 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
74644 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
74645 */
74646 extern atomic_t rxrpc_n_skbs;
74647 extern __be32 rxrpc_epoch;
74648 -extern atomic_t rxrpc_debug_id;
74649 +extern atomic_unchecked_t rxrpc_debug_id;
74650 extern struct workqueue_struct *rxrpc_workqueue;
74651
74652 /*
74653 diff -urNp linux-3.0.8/net/rxrpc/ar-local.c linux-3.0.8/net/rxrpc/ar-local.c
74654 --- linux-3.0.8/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
74655 +++ linux-3.0.8/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
74656 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
74657 spin_lock_init(&local->lock);
74658 rwlock_init(&local->services_lock);
74659 atomic_set(&local->usage, 1);
74660 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
74661 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74662 memcpy(&local->srx, srx, sizeof(*srx));
74663 }
74664
74665 diff -urNp linux-3.0.8/net/rxrpc/ar-output.c linux-3.0.8/net/rxrpc/ar-output.c
74666 --- linux-3.0.8/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
74667 +++ linux-3.0.8/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
74668 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
74669 sp->hdr.cid = call->cid;
74670 sp->hdr.callNumber = call->call_id;
74671 sp->hdr.seq =
74672 - htonl(atomic_inc_return(&call->sequence));
74673 + htonl(atomic_inc_return_unchecked(&call->sequence));
74674 sp->hdr.serial =
74675 - htonl(atomic_inc_return(&conn->serial));
74676 + htonl(atomic_inc_return_unchecked(&conn->serial));
74677 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
74678 sp->hdr.userStatus = 0;
74679 sp->hdr.securityIndex = conn->security_ix;
74680 diff -urNp linux-3.0.8/net/rxrpc/ar-peer.c linux-3.0.8/net/rxrpc/ar-peer.c
74681 --- linux-3.0.8/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
74682 +++ linux-3.0.8/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
74683 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
74684 INIT_LIST_HEAD(&peer->error_targets);
74685 spin_lock_init(&peer->lock);
74686 atomic_set(&peer->usage, 1);
74687 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
74688 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74689 memcpy(&peer->srx, srx, sizeof(*srx));
74690
74691 rxrpc_assess_MTU_size(peer);
74692 diff -urNp linux-3.0.8/net/rxrpc/ar-proc.c linux-3.0.8/net/rxrpc/ar-proc.c
74693 --- linux-3.0.8/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
74694 +++ linux-3.0.8/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
74695 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
74696 atomic_read(&conn->usage),
74697 rxrpc_conn_states[conn->state],
74698 key_serial(conn->key),
74699 - atomic_read(&conn->serial),
74700 - atomic_read(&conn->hi_serial));
74701 + atomic_read_unchecked(&conn->serial),
74702 + atomic_read_unchecked(&conn->hi_serial));
74703
74704 return 0;
74705 }
74706 diff -urNp linux-3.0.8/net/rxrpc/ar-transport.c linux-3.0.8/net/rxrpc/ar-transport.c
74707 --- linux-3.0.8/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
74708 +++ linux-3.0.8/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
74709 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
74710 spin_lock_init(&trans->client_lock);
74711 rwlock_init(&trans->conn_lock);
74712 atomic_set(&trans->usage, 1);
74713 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
74714 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74715
74716 if (peer->srx.transport.family == AF_INET) {
74717 switch (peer->srx.transport_type) {
74718 diff -urNp linux-3.0.8/net/rxrpc/rxkad.c linux-3.0.8/net/rxrpc/rxkad.c
74719 --- linux-3.0.8/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
74720 +++ linux-3.0.8/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
74721 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
74722 u16 check;
74723 int nsg;
74724
74725 + pax_track_stack();
74726 +
74727 sp = rxrpc_skb(skb);
74728
74729 _enter("");
74730 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
74731 u16 check;
74732 int nsg;
74733
74734 + pax_track_stack();
74735 +
74736 _enter("");
74737
74738 sp = rxrpc_skb(skb);
74739 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
74740
74741 len = iov[0].iov_len + iov[1].iov_len;
74742
74743 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74744 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74745 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
74746
74747 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74748 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
74749
74750 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
74751
74752 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
74753 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74754 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
74755
74756 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
74757 diff -urNp linux-3.0.8/net/sctp/proc.c linux-3.0.8/net/sctp/proc.c
74758 --- linux-3.0.8/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
74759 +++ linux-3.0.8/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
74760 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
74761 seq_printf(seq,
74762 "%8pK %8pK %-3d %-3d %-2d %-4d "
74763 "%4d %8d %8d %7d %5lu %-5d %5d ",
74764 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
74765 + assoc, sk,
74766 + sctp_sk(sk)->type, sk->sk_state,
74767 assoc->state, hash,
74768 assoc->assoc_id,
74769 assoc->sndbuf_used,
74770 diff -urNp linux-3.0.8/net/sctp/socket.c linux-3.0.8/net/sctp/socket.c
74771 --- linux-3.0.8/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
74772 +++ linux-3.0.8/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
74773 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
74774 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
74775 if (space_left < addrlen)
74776 return -ENOMEM;
74777 - if (copy_to_user(to, &temp, addrlen))
74778 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
74779 return -EFAULT;
74780 to += addrlen;
74781 cnt++;
74782 diff -urNp linux-3.0.8/net/socket.c linux-3.0.8/net/socket.c
74783 --- linux-3.0.8/net/socket.c 2011-10-24 08:05:30.000000000 -0400
74784 +++ linux-3.0.8/net/socket.c 2011-10-16 21:55:28.000000000 -0400
74785 @@ -88,6 +88,7 @@
74786 #include <linux/nsproxy.h>
74787 #include <linux/magic.h>
74788 #include <linux/slab.h>
74789 +#include <linux/in.h>
74790
74791 #include <asm/uaccess.h>
74792 #include <asm/unistd.h>
74793 @@ -105,6 +106,8 @@
74794 #include <linux/sockios.h>
74795 #include <linux/atalk.h>
74796
74797 +#include <linux/grsock.h>
74798 +
74799 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
74800 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
74801 unsigned long nr_segs, loff_t pos);
74802 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
74803 &sockfs_dentry_operations, SOCKFS_MAGIC);
74804 }
74805
74806 -static struct vfsmount *sock_mnt __read_mostly;
74807 +struct vfsmount *sock_mnt __read_mostly;
74808
74809 static struct file_system_type sock_fs_type = {
74810 .name = "sockfs",
74811 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
74812 return -EAFNOSUPPORT;
74813 if (type < 0 || type >= SOCK_MAX)
74814 return -EINVAL;
74815 + if (protocol < 0)
74816 + return -EINVAL;
74817
74818 /* Compatibility.
74819
74820 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
74821 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
74822 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
74823
74824 + if(!gr_search_socket(family, type, protocol)) {
74825 + retval = -EACCES;
74826 + goto out;
74827 + }
74828 +
74829 + if (gr_handle_sock_all(family, type, protocol)) {
74830 + retval = -EACCES;
74831 + goto out;
74832 + }
74833 +
74834 retval = sock_create(family, type, protocol, &sock);
74835 if (retval < 0)
74836 goto out;
74837 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74838 if (sock) {
74839 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
74840 if (err >= 0) {
74841 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
74842 + err = -EACCES;
74843 + goto error;
74844 + }
74845 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
74846 + if (err)
74847 + goto error;
74848 +
74849 err = security_socket_bind(sock,
74850 (struct sockaddr *)&address,
74851 addrlen);
74852 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74853 (struct sockaddr *)
74854 &address, addrlen);
74855 }
74856 +error:
74857 fput_light(sock->file, fput_needed);
74858 }
74859 return err;
74860 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
74861 if ((unsigned)backlog > somaxconn)
74862 backlog = somaxconn;
74863
74864 + if (gr_handle_sock_server_other(sock->sk)) {
74865 + err = -EPERM;
74866 + goto error;
74867 + }
74868 +
74869 + err = gr_search_listen(sock);
74870 + if (err)
74871 + goto error;
74872 +
74873 err = security_socket_listen(sock, backlog);
74874 if (!err)
74875 err = sock->ops->listen(sock, backlog);
74876
74877 +error:
74878 fput_light(sock->file, fput_needed);
74879 }
74880 return err;
74881 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74882 newsock->type = sock->type;
74883 newsock->ops = sock->ops;
74884
74885 + if (gr_handle_sock_server_other(sock->sk)) {
74886 + err = -EPERM;
74887 + sock_release(newsock);
74888 + goto out_put;
74889 + }
74890 +
74891 + err = gr_search_accept(sock);
74892 + if (err) {
74893 + sock_release(newsock);
74894 + goto out_put;
74895 + }
74896 +
74897 /*
74898 * We don't need try_module_get here, as the listening socket (sock)
74899 * has the protocol module (sock->ops->owner) held.
74900 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74901 fd_install(newfd, newfile);
74902 err = newfd;
74903
74904 + gr_attach_curr_ip(newsock->sk);
74905 +
74906 out_put:
74907 fput_light(sock->file, fput_needed);
74908 out:
74909 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74910 int, addrlen)
74911 {
74912 struct socket *sock;
74913 + struct sockaddr *sck;
74914 struct sockaddr_storage address;
74915 int err, fput_needed;
74916
74917 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74918 if (err < 0)
74919 goto out_put;
74920
74921 + sck = (struct sockaddr *)&address;
74922 +
74923 + if (gr_handle_sock_client(sck)) {
74924 + err = -EACCES;
74925 + goto out_put;
74926 + }
74927 +
74928 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
74929 + if (err)
74930 + goto out_put;
74931 +
74932 err =
74933 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
74934 if (err)
74935 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
74936 unsigned char *ctl_buf = ctl;
74937 int err, ctl_len, iov_size, total_len;
74938
74939 + pax_track_stack();
74940 +
74941 err = -EFAULT;
74942 if (MSG_CMSG_COMPAT & flags) {
74943 if (get_compat_msghdr(msg_sys, msg_compat))
74944 @@ -1950,7 +2012,7 @@ static int __sys_sendmsg(struct socket *
74945 * checking falls down on this.
74946 */
74947 if (copy_from_user(ctl_buf,
74948 - (void __user __force *)msg_sys->msg_control,
74949 + (void __force_user *)msg_sys->msg_control,
74950 ctl_len))
74951 goto out_freectl;
74952 msg_sys->msg_control = ctl_buf;
74953 @@ -2120,7 +2182,7 @@ static int __sys_recvmsg(struct socket *
74954 * kernel msghdr to use the kernel address space)
74955 */
74956
74957 - uaddr = (__force void __user *)msg_sys->msg_name;
74958 + uaddr = (void __force_user *)msg_sys->msg_name;
74959 uaddr_len = COMPAT_NAMELEN(msg);
74960 if (MSG_CMSG_COMPAT & flags) {
74961 err = verify_compat_iovec(msg_sys, iov,
74962 @@ -2748,7 +2810,7 @@ static int ethtool_ioctl(struct net *net
74963 }
74964
74965 ifr = compat_alloc_user_space(buf_size);
74966 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
74967 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
74968
74969 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
74970 return -EFAULT;
74971 @@ -2772,12 +2834,12 @@ static int ethtool_ioctl(struct net *net
74972 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
74973
74974 if (copy_in_user(rxnfc, compat_rxnfc,
74975 - (void *)(&rxnfc->fs.m_ext + 1) -
74976 - (void *)rxnfc) ||
74977 + (void __user *)(&rxnfc->fs.m_ext + 1) -
74978 + (void __user *)rxnfc) ||
74979 copy_in_user(&rxnfc->fs.ring_cookie,
74980 &compat_rxnfc->fs.ring_cookie,
74981 - (void *)(&rxnfc->fs.location + 1) -
74982 - (void *)&rxnfc->fs.ring_cookie) ||
74983 + (void __user *)(&rxnfc->fs.location + 1) -
74984 + (void __user *)&rxnfc->fs.ring_cookie) ||
74985 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
74986 sizeof(rxnfc->rule_cnt)))
74987 return -EFAULT;
74988 @@ -2789,12 +2851,12 @@ static int ethtool_ioctl(struct net *net
74989
74990 if (convert_out) {
74991 if (copy_in_user(compat_rxnfc, rxnfc,
74992 - (const void *)(&rxnfc->fs.m_ext + 1) -
74993 - (const void *)rxnfc) ||
74994 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
74995 + (const void __user *)rxnfc) ||
74996 copy_in_user(&compat_rxnfc->fs.ring_cookie,
74997 &rxnfc->fs.ring_cookie,
74998 - (const void *)(&rxnfc->fs.location + 1) -
74999 - (const void *)&rxnfc->fs.ring_cookie) ||
75000 + (const void __user *)(&rxnfc->fs.location + 1) -
75001 + (const void __user *)&rxnfc->fs.ring_cookie) ||
75002 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
75003 sizeof(rxnfc->rule_cnt)))
75004 return -EFAULT;
75005 @@ -2864,7 +2926,7 @@ static int bond_ioctl(struct net *net, u
75006 old_fs = get_fs();
75007 set_fs(KERNEL_DS);
75008 err = dev_ioctl(net, cmd,
75009 - (struct ifreq __user __force *) &kifr);
75010 + (struct ifreq __force_user *) &kifr);
75011 set_fs(old_fs);
75012
75013 return err;
75014 @@ -2973,7 +3035,7 @@ static int compat_sioc_ifmap(struct net
75015
75016 old_fs = get_fs();
75017 set_fs(KERNEL_DS);
75018 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
75019 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
75020 set_fs(old_fs);
75021
75022 if (cmd == SIOCGIFMAP && !err) {
75023 @@ -3078,7 +3140,7 @@ static int routing_ioctl(struct net *net
75024 ret |= __get_user(rtdev, &(ur4->rt_dev));
75025 if (rtdev) {
75026 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
75027 - r4.rt_dev = (char __user __force *)devname;
75028 + r4.rt_dev = (char __force_user *)devname;
75029 devname[15] = 0;
75030 } else
75031 r4.rt_dev = NULL;
75032 @@ -3318,8 +3380,8 @@ int kernel_getsockopt(struct socket *soc
75033 int __user *uoptlen;
75034 int err;
75035
75036 - uoptval = (char __user __force *) optval;
75037 - uoptlen = (int __user __force *) optlen;
75038 + uoptval = (char __force_user *) optval;
75039 + uoptlen = (int __force_user *) optlen;
75040
75041 set_fs(KERNEL_DS);
75042 if (level == SOL_SOCKET)
75043 @@ -3339,7 +3401,7 @@ int kernel_setsockopt(struct socket *soc
75044 char __user *uoptval;
75045 int err;
75046
75047 - uoptval = (char __user __force *) optval;
75048 + uoptval = (char __force_user *) optval;
75049
75050 set_fs(KERNEL_DS);
75051 if (level == SOL_SOCKET)
75052 diff -urNp linux-3.0.8/net/sunrpc/sched.c linux-3.0.8/net/sunrpc/sched.c
75053 --- linux-3.0.8/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
75054 +++ linux-3.0.8/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
75055 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
75056 #ifdef RPC_DEBUG
75057 static void rpc_task_set_debuginfo(struct rpc_task *task)
75058 {
75059 - static atomic_t rpc_pid;
75060 + static atomic_unchecked_t rpc_pid;
75061
75062 - task->tk_pid = atomic_inc_return(&rpc_pid);
75063 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75064 }
75065 #else
75066 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75067 diff -urNp linux-3.0.8/net/sunrpc/svcsock.c linux-3.0.8/net/sunrpc/svcsock.c
75068 --- linux-3.0.8/net/sunrpc/svcsock.c 2011-07-21 22:17:23.000000000 -0400
75069 +++ linux-3.0.8/net/sunrpc/svcsock.c 2011-10-06 04:17:55.000000000 -0400
75070 @@ -392,7 +392,7 @@ static int svc_partial_recvfrom(struct s
75071 int buflen, unsigned int base)
75072 {
75073 size_t save_iovlen;
75074 - void __user *save_iovbase;
75075 + void *save_iovbase;
75076 unsigned int i;
75077 int ret;
75078
75079 diff -urNp linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma.c
75080 --- linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
75081 +++ linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
75082 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
75083 static unsigned int min_max_inline = 4096;
75084 static unsigned int max_max_inline = 65536;
75085
75086 -atomic_t rdma_stat_recv;
75087 -atomic_t rdma_stat_read;
75088 -atomic_t rdma_stat_write;
75089 -atomic_t rdma_stat_sq_starve;
75090 -atomic_t rdma_stat_rq_starve;
75091 -atomic_t rdma_stat_rq_poll;
75092 -atomic_t rdma_stat_rq_prod;
75093 -atomic_t rdma_stat_sq_poll;
75094 -atomic_t rdma_stat_sq_prod;
75095 +atomic_unchecked_t rdma_stat_recv;
75096 +atomic_unchecked_t rdma_stat_read;
75097 +atomic_unchecked_t rdma_stat_write;
75098 +atomic_unchecked_t rdma_stat_sq_starve;
75099 +atomic_unchecked_t rdma_stat_rq_starve;
75100 +atomic_unchecked_t rdma_stat_rq_poll;
75101 +atomic_unchecked_t rdma_stat_rq_prod;
75102 +atomic_unchecked_t rdma_stat_sq_poll;
75103 +atomic_unchecked_t rdma_stat_sq_prod;
75104
75105 /* Temporary NFS request map and context caches */
75106 struct kmem_cache *svc_rdma_map_cachep;
75107 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
75108 len -= *ppos;
75109 if (len > *lenp)
75110 len = *lenp;
75111 - if (len && copy_to_user(buffer, str_buf, len))
75112 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75113 return -EFAULT;
75114 *lenp = len;
75115 *ppos += len;
75116 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
75117 {
75118 .procname = "rdma_stat_read",
75119 .data = &rdma_stat_read,
75120 - .maxlen = sizeof(atomic_t),
75121 + .maxlen = sizeof(atomic_unchecked_t),
75122 .mode = 0644,
75123 .proc_handler = read_reset_stat,
75124 },
75125 {
75126 .procname = "rdma_stat_recv",
75127 .data = &rdma_stat_recv,
75128 - .maxlen = sizeof(atomic_t),
75129 + .maxlen = sizeof(atomic_unchecked_t),
75130 .mode = 0644,
75131 .proc_handler = read_reset_stat,
75132 },
75133 {
75134 .procname = "rdma_stat_write",
75135 .data = &rdma_stat_write,
75136 - .maxlen = sizeof(atomic_t),
75137 + .maxlen = sizeof(atomic_unchecked_t),
75138 .mode = 0644,
75139 .proc_handler = read_reset_stat,
75140 },
75141 {
75142 .procname = "rdma_stat_sq_starve",
75143 .data = &rdma_stat_sq_starve,
75144 - .maxlen = sizeof(atomic_t),
75145 + .maxlen = sizeof(atomic_unchecked_t),
75146 .mode = 0644,
75147 .proc_handler = read_reset_stat,
75148 },
75149 {
75150 .procname = "rdma_stat_rq_starve",
75151 .data = &rdma_stat_rq_starve,
75152 - .maxlen = sizeof(atomic_t),
75153 + .maxlen = sizeof(atomic_unchecked_t),
75154 .mode = 0644,
75155 .proc_handler = read_reset_stat,
75156 },
75157 {
75158 .procname = "rdma_stat_rq_poll",
75159 .data = &rdma_stat_rq_poll,
75160 - .maxlen = sizeof(atomic_t),
75161 + .maxlen = sizeof(atomic_unchecked_t),
75162 .mode = 0644,
75163 .proc_handler = read_reset_stat,
75164 },
75165 {
75166 .procname = "rdma_stat_rq_prod",
75167 .data = &rdma_stat_rq_prod,
75168 - .maxlen = sizeof(atomic_t),
75169 + .maxlen = sizeof(atomic_unchecked_t),
75170 .mode = 0644,
75171 .proc_handler = read_reset_stat,
75172 },
75173 {
75174 .procname = "rdma_stat_sq_poll",
75175 .data = &rdma_stat_sq_poll,
75176 - .maxlen = sizeof(atomic_t),
75177 + .maxlen = sizeof(atomic_unchecked_t),
75178 .mode = 0644,
75179 .proc_handler = read_reset_stat,
75180 },
75181 {
75182 .procname = "rdma_stat_sq_prod",
75183 .data = &rdma_stat_sq_prod,
75184 - .maxlen = sizeof(atomic_t),
75185 + .maxlen = sizeof(atomic_unchecked_t),
75186 .mode = 0644,
75187 .proc_handler = read_reset_stat,
75188 },
75189 diff -urNp linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75190 --- linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
75191 +++ linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
75192 @@ -499,7 +499,7 @@ next_sge:
75193 svc_rdma_put_context(ctxt, 0);
75194 goto out;
75195 }
75196 - atomic_inc(&rdma_stat_read);
75197 + atomic_inc_unchecked(&rdma_stat_read);
75198
75199 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75200 chl_map->ch[ch_no].count -= read_wr.num_sge;
75201 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
75202 dto_q);
75203 list_del_init(&ctxt->dto_q);
75204 } else {
75205 - atomic_inc(&rdma_stat_rq_starve);
75206 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75207 clear_bit(XPT_DATA, &xprt->xpt_flags);
75208 ctxt = NULL;
75209 }
75210 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
75211 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75212 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75213 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75214 - atomic_inc(&rdma_stat_recv);
75215 + atomic_inc_unchecked(&rdma_stat_recv);
75216
75217 /* Build up the XDR from the receive buffers. */
75218 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75219 diff -urNp linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75220 --- linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
75221 +++ linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
75222 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
75223 write_wr.wr.rdma.remote_addr = to;
75224
75225 /* Post It */
75226 - atomic_inc(&rdma_stat_write);
75227 + atomic_inc_unchecked(&rdma_stat_write);
75228 if (svc_rdma_send(xprt, &write_wr))
75229 goto err;
75230 return 0;
75231 diff -urNp linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_transport.c
75232 --- linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
75233 +++ linux-3.0.8/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
75234 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
75235 return;
75236
75237 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75238 - atomic_inc(&rdma_stat_rq_poll);
75239 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75240
75241 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75242 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75243 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
75244 }
75245
75246 if (ctxt)
75247 - atomic_inc(&rdma_stat_rq_prod);
75248 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75249
75250 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75251 /*
75252 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
75253 return;
75254
75255 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75256 - atomic_inc(&rdma_stat_sq_poll);
75257 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75258 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75259 if (wc.status != IB_WC_SUCCESS)
75260 /* Close the transport */
75261 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
75262 }
75263
75264 if (ctxt)
75265 - atomic_inc(&rdma_stat_sq_prod);
75266 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75267 }
75268
75269 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75270 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
75271 spin_lock_bh(&xprt->sc_lock);
75272 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75273 spin_unlock_bh(&xprt->sc_lock);
75274 - atomic_inc(&rdma_stat_sq_starve);
75275 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75276
75277 /* See if we can opportunistically reap SQ WR to make room */
75278 sq_cq_reap(xprt);
75279 diff -urNp linux-3.0.8/net/sysctl_net.c linux-3.0.8/net/sysctl_net.c
75280 --- linux-3.0.8/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
75281 +++ linux-3.0.8/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
75282 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
75283 struct ctl_table *table)
75284 {
75285 /* Allow network administrator to have same access as root. */
75286 - if (capable(CAP_NET_ADMIN)) {
75287 + if (capable_nolog(CAP_NET_ADMIN)) {
75288 int mode = (table->mode >> 6) & 7;
75289 return (mode << 6) | (mode << 3) | mode;
75290 }
75291 diff -urNp linux-3.0.8/net/tipc/link.c linux-3.0.8/net/tipc/link.c
75292 --- linux-3.0.8/net/tipc/link.c 2011-07-21 22:17:23.000000000 -0400
75293 +++ linux-3.0.8/net/tipc/link.c 2011-10-06 04:17:55.000000000 -0400
75294 @@ -1170,7 +1170,7 @@ static int link_send_sections_long(struc
75295 struct tipc_msg fragm_hdr;
75296 struct sk_buff *buf, *buf_chain, *prev;
75297 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75298 - const unchar *sect_crs;
75299 + const unchar __user *sect_crs;
75300 int curr_sect;
75301 u32 fragm_no;
75302
75303 @@ -1214,7 +1214,7 @@ again:
75304
75305 if (!sect_rest) {
75306 sect_rest = msg_sect[++curr_sect].iov_len;
75307 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75308 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75309 }
75310
75311 if (sect_rest < fragm_rest)
75312 @@ -1233,7 +1233,7 @@ error:
75313 }
75314 } else
75315 skb_copy_to_linear_data_offset(buf, fragm_crs,
75316 - sect_crs, sz);
75317 + (const void __force_kernel *)sect_crs, sz);
75318 sect_crs += sz;
75319 sect_rest -= sz;
75320 fragm_crs += sz;
75321 diff -urNp linux-3.0.8/net/tipc/msg.c linux-3.0.8/net/tipc/msg.c
75322 --- linux-3.0.8/net/tipc/msg.c 2011-07-21 22:17:23.000000000 -0400
75323 +++ linux-3.0.8/net/tipc/msg.c 2011-10-06 04:17:55.000000000 -0400
75324 @@ -101,7 +101,7 @@ int tipc_msg_build(struct tipc_msg *hdr,
75325 msg_sect[cnt].iov_len);
75326 else
75327 skb_copy_to_linear_data_offset(*buf, pos,
75328 - msg_sect[cnt].iov_base,
75329 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75330 msg_sect[cnt].iov_len);
75331 pos += msg_sect[cnt].iov_len;
75332 }
75333 diff -urNp linux-3.0.8/net/tipc/subscr.c linux-3.0.8/net/tipc/subscr.c
75334 --- linux-3.0.8/net/tipc/subscr.c 2011-07-21 22:17:23.000000000 -0400
75335 +++ linux-3.0.8/net/tipc/subscr.c 2011-10-06 04:17:55.000000000 -0400
75336 @@ -101,7 +101,7 @@ static void subscr_send_event(struct sub
75337 {
75338 struct iovec msg_sect;
75339
75340 - msg_sect.iov_base = (void *)&sub->evt;
75341 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75342 msg_sect.iov_len = sizeof(struct tipc_event);
75343
75344 sub->evt.event = htohl(event, sub->swap);
75345 diff -urNp linux-3.0.8/net/unix/af_unix.c linux-3.0.8/net/unix/af_unix.c
75346 --- linux-3.0.8/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
75347 +++ linux-3.0.8/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
75348 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
75349 err = -ECONNREFUSED;
75350 if (!S_ISSOCK(inode->i_mode))
75351 goto put_fail;
75352 +
75353 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75354 + err = -EACCES;
75355 + goto put_fail;
75356 + }
75357 +
75358 u = unix_find_socket_byinode(inode);
75359 if (!u)
75360 goto put_fail;
75361 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
75362 if (u) {
75363 struct dentry *dentry;
75364 dentry = unix_sk(u)->dentry;
75365 +
75366 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75367 + err = -EPERM;
75368 + sock_put(u);
75369 + goto fail;
75370 + }
75371 +
75372 if (dentry)
75373 touch_atime(unix_sk(u)->mnt, dentry);
75374 } else
75375 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
75376 err = security_path_mknod(&nd.path, dentry, mode, 0);
75377 if (err)
75378 goto out_mknod_drop_write;
75379 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
75380 + err = -EACCES;
75381 + goto out_mknod_drop_write;
75382 + }
75383 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
75384 out_mknod_drop_write:
75385 mnt_drop_write(nd.path.mnt);
75386 if (err)
75387 goto out_mknod_dput;
75388 +
75389 + gr_handle_create(dentry, nd.path.mnt);
75390 +
75391 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
75392 dput(nd.path.dentry);
75393 nd.path.dentry = dentry;
75394 diff -urNp linux-3.0.8/net/wireless/core.h linux-3.0.8/net/wireless/core.h
75395 --- linux-3.0.8/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
75396 +++ linux-3.0.8/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
75397 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75398 struct mutex mtx;
75399
75400 /* rfkill support */
75401 - struct rfkill_ops rfkill_ops;
75402 + rfkill_ops_no_const rfkill_ops;
75403 struct rfkill *rfkill;
75404 struct work_struct rfkill_sync;
75405
75406 diff -urNp linux-3.0.8/net/wireless/wext-core.c linux-3.0.8/net/wireless/wext-core.c
75407 --- linux-3.0.8/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
75408 +++ linux-3.0.8/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
75409 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
75410 */
75411
75412 /* Support for very large requests */
75413 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75414 - (user_length > descr->max_tokens)) {
75415 + if (user_length > descr->max_tokens) {
75416 /* Allow userspace to GET more than max so
75417 * we can support any size GET requests.
75418 * There is still a limit : -ENOMEM.
75419 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
75420 }
75421 }
75422
75423 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75424 - /*
75425 - * If this is a GET, but not NOMAX, it means that the extra
75426 - * data is not bounded by userspace, but by max_tokens. Thus
75427 - * set the length to max_tokens. This matches the extra data
75428 - * allocation.
75429 - * The driver should fill it with the number of tokens it
75430 - * provided, and it may check iwp->length rather than having
75431 - * knowledge of max_tokens. If the driver doesn't change the
75432 - * iwp->length, this ioctl just copies back max_token tokens
75433 - * filled with zeroes. Hopefully the driver isn't claiming
75434 - * them to be valid data.
75435 - */
75436 - iwp->length = descr->max_tokens;
75437 - }
75438 -
75439 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75440
75441 iwp->length += essid_compat;
75442 diff -urNp linux-3.0.8/net/xfrm/xfrm_policy.c linux-3.0.8/net/xfrm/xfrm_policy.c
75443 --- linux-3.0.8/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
75444 +++ linux-3.0.8/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
75445 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
75446 {
75447 policy->walk.dead = 1;
75448
75449 - atomic_inc(&policy->genid);
75450 + atomic_inc_unchecked(&policy->genid);
75451
75452 if (del_timer(&policy->timer))
75453 xfrm_pol_put(policy);
75454 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
75455 hlist_add_head(&policy->bydst, chain);
75456 xfrm_pol_hold(policy);
75457 net->xfrm.policy_count[dir]++;
75458 - atomic_inc(&flow_cache_genid);
75459 + atomic_inc_unchecked(&flow_cache_genid);
75460 if (delpol)
75461 __xfrm_policy_unlink(delpol, dir);
75462 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75463 @@ -1528,7 +1528,7 @@ free_dst:
75464 goto out;
75465 }
75466
75467 -static int inline
75468 +static inline int
75469 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75470 {
75471 if (!*target) {
75472 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
75473 return 0;
75474 }
75475
75476 -static int inline
75477 +static inline int
75478 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75479 {
75480 #ifdef CONFIG_XFRM_SUB_POLICY
75481 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
75482 #endif
75483 }
75484
75485 -static int inline
75486 +static inline int
75487 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75488 {
75489 #ifdef CONFIG_XFRM_SUB_POLICY
75490 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
75491
75492 xdst->num_pols = num_pols;
75493 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75494 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75495 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75496
75497 return xdst;
75498 }
75499 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
75500 if (xdst->xfrm_genid != dst->xfrm->genid)
75501 return 0;
75502 if (xdst->num_pols > 0 &&
75503 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75504 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75505 return 0;
75506
75507 mtu = dst_mtu(dst->child);
75508 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
75509 sizeof(pol->xfrm_vec[i].saddr));
75510 pol->xfrm_vec[i].encap_family = mp->new_family;
75511 /* flush bundles */
75512 - atomic_inc(&pol->genid);
75513 + atomic_inc_unchecked(&pol->genid);
75514 }
75515 }
75516
75517 diff -urNp linux-3.0.8/net/xfrm/xfrm_user.c linux-3.0.8/net/xfrm/xfrm_user.c
75518 --- linux-3.0.8/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
75519 +++ linux-3.0.8/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
75520 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
75521 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
75522 int i;
75523
75524 + pax_track_stack();
75525 +
75526 if (xp->xfrm_nr == 0)
75527 return 0;
75528
75529 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
75530 int err;
75531 int n = 0;
75532
75533 + pax_track_stack();
75534 +
75535 if (attrs[XFRMA_MIGRATE] == NULL)
75536 return -EINVAL;
75537
75538 diff -urNp linux-3.0.8/scripts/basic/fixdep.c linux-3.0.8/scripts/basic/fixdep.c
75539 --- linux-3.0.8/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
75540 +++ linux-3.0.8/scripts/basic/fixdep.c 2011-10-06 04:17:55.000000000 -0400
75541 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *
75542 /*
75543 * Lookup a value in the configuration string.
75544 */
75545 -static int is_defined_config(const char *name, int len, unsigned int hash)
75546 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75547 {
75548 struct item *aux;
75549
75550 @@ -211,10 +211,10 @@ static void clear_config(void)
75551 /*
75552 * Record the use of a CONFIG_* word.
75553 */
75554 -static void use_config(const char *m, int slen)
75555 +static void use_config(const char *m, unsigned int slen)
75556 {
75557 unsigned int hash = strhash(m, slen);
75558 - int c, i;
75559 + unsigned int c, i;
75560
75561 if (is_defined_config(m, slen, hash))
75562 return;
75563 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
75564
75565 static void parse_config_file(const char *map, size_t len)
75566 {
75567 - const int *end = (const int *) (map + len);
75568 + const unsigned int *end = (const unsigned int *) (map + len);
75569 /* start at +1, so that p can never be < map */
75570 - const int *m = (const int *) map + 1;
75571 + const unsigned int *m = (const unsigned int *) map + 1;
75572 const char *p, *q;
75573
75574 for (; m < end; m++) {
75575 @@ -405,7 +405,7 @@ static void print_deps(void)
75576 static void traps(void)
75577 {
75578 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
75579 - int *p = (int *)test;
75580 + unsigned int *p = (unsigned int *)test;
75581
75582 if (*p != INT_CONF) {
75583 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
75584 diff -urNp linux-3.0.8/scripts/gcc-plugin.sh linux-3.0.8/scripts/gcc-plugin.sh
75585 --- linux-3.0.8/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
75586 +++ linux-3.0.8/scripts/gcc-plugin.sh 2011-09-14 09:08:05.000000000 -0400
75587 @@ -0,0 +1,2 @@
75588 +#!/bin/sh
75589 +echo "#include \"gcc-plugin.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
75590 diff -urNp linux-3.0.8/scripts/Makefile.build linux-3.0.8/scripts/Makefile.build
75591 --- linux-3.0.8/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
75592 +++ linux-3.0.8/scripts/Makefile.build 2011-11-01 05:23:50.000000000 -0400
75593 @@ -62,7 +62,7 @@ endif
75594 ifdef KBUILD_ENABLE_EXTRA_GCC_CHECKS
75595 warning- := $(empty)
75596
75597 -warning-1 := -Wextra -Wunused -Wno-unused-parameter
75598 +warning-1 := -Wextra -Wunused -Wno-unused-parameter -Wno-missing-field-initializers
75599 warning-1 += -Wmissing-declarations
75600 warning-1 += -Wmissing-format-attribute
75601 warning-1 += -Wmissing-prototypes
75602 @@ -109,7 +109,7 @@ endif
75603 endif
75604
75605 # Do not include host rules unless needed
75606 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75607 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75608 include scripts/Makefile.host
75609 endif
75610
75611 diff -urNp linux-3.0.8/scripts/Makefile.clean linux-3.0.8/scripts/Makefile.clean
75612 --- linux-3.0.8/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
75613 +++ linux-3.0.8/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
75614 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
75615 __clean-files := $(extra-y) $(always) \
75616 $(targets) $(clean-files) \
75617 $(host-progs) \
75618 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75619 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75620 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75621
75622 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75623
75624 diff -urNp linux-3.0.8/scripts/Makefile.host linux-3.0.8/scripts/Makefile.host
75625 --- linux-3.0.8/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
75626 +++ linux-3.0.8/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
75627 @@ -31,6 +31,7 @@
75628 # Note: Shared libraries consisting of C++ files are not supported
75629
75630 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75631 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75632
75633 # C code
75634 # Executables compiled from a single .c file
75635 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
75636 # Shared libaries (only .c supported)
75637 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75638 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75639 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75640 # Remove .so files from "xxx-objs"
75641 host-cobjs := $(filter-out %.so,$(host-cobjs))
75642
75643 diff -urNp linux-3.0.8/scripts/mod/file2alias.c linux-3.0.8/scripts/mod/file2alias.c
75644 --- linux-3.0.8/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
75645 +++ linux-3.0.8/scripts/mod/file2alias.c 2011-10-06 04:17:55.000000000 -0400
75646 @@ -72,7 +72,7 @@ static void device_id_check(const char *
75647 unsigned long size, unsigned long id_size,
75648 void *symval)
75649 {
75650 - int i;
75651 + unsigned int i;
75652
75653 if (size % id_size || size < id_size) {
75654 if (cross_build != 0)
75655 @@ -102,7 +102,7 @@ static void device_id_check(const char *
75656 /* USB is special because the bcdDevice can be matched against a numeric range */
75657 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
75658 static void do_usb_entry(struct usb_device_id *id,
75659 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
75660 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
75661 unsigned char range_lo, unsigned char range_hi,
75662 unsigned char max, struct module *mod)
75663 {
75664 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct us
75665 {
75666 unsigned int devlo, devhi;
75667 unsigned char chi, clo, max;
75668 - int ndigits;
75669 + unsigned int ndigits;
75670
75671 id->match_flags = TO_NATIVE(id->match_flags);
75672 id->idVendor = TO_NATIVE(id->idVendor);
75673 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
75674 for (i = 0; i < count; i++) {
75675 const char *id = (char *)devs[i].id;
75676 char acpi_id[sizeof(devs[0].id)];
75677 - int j;
75678 + unsigned int j;
75679
75680 buf_printf(&mod->dev_table_buf,
75681 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75682 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
75683
75684 for (j = 0; j < PNP_MAX_DEVICES; j++) {
75685 const char *id = (char *)card->devs[j].id;
75686 - int i2, j2;
75687 + unsigned int i2, j2;
75688 int dup = 0;
75689
75690 if (!id[0])
75691 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
75692 /* add an individual alias for every device entry */
75693 if (!dup) {
75694 char acpi_id[sizeof(card->devs[0].id)];
75695 - int k;
75696 + unsigned int k;
75697
75698 buf_printf(&mod->dev_table_buf,
75699 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75700 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
75701 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
75702 char *alias)
75703 {
75704 - int i, j;
75705 + unsigned int i, j;
75706
75707 sprintf(alias, "dmi*");
75708
75709 diff -urNp linux-3.0.8/scripts/mod/modpost.c linux-3.0.8/scripts/mod/modpost.c
75710 --- linux-3.0.8/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
75711 +++ linux-3.0.8/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
75712 @@ -892,6 +892,7 @@ enum mismatch {
75713 ANY_INIT_TO_ANY_EXIT,
75714 ANY_EXIT_TO_ANY_INIT,
75715 EXPORT_TO_INIT_EXIT,
75716 + DATA_TO_TEXT
75717 };
75718
75719 struct sectioncheck {
75720 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
75721 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
75722 .mismatch = EXPORT_TO_INIT_EXIT,
75723 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
75724 +},
75725 +/* Do not reference code from writable data */
75726 +{
75727 + .fromsec = { DATA_SECTIONS, NULL },
75728 + .tosec = { TEXT_SECTIONS, NULL },
75729 + .mismatch = DATA_TO_TEXT
75730 }
75731 };
75732
75733 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
75734 continue;
75735 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
75736 continue;
75737 - if (sym->st_value == addr)
75738 - return sym;
75739 /* Find a symbol nearby - addr are maybe negative */
75740 d = sym->st_value - addr;
75741 + if (d == 0)
75742 + return sym;
75743 if (d < 0)
75744 d = addr - sym->st_value;
75745 if (d < distance) {
75746 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
75747 tosym, prl_to, prl_to, tosym);
75748 free(prl_to);
75749 break;
75750 + case DATA_TO_TEXT:
75751 +/*
75752 + fprintf(stderr,
75753 + "The variable %s references\n"
75754 + "the %s %s%s%s\n",
75755 + fromsym, to, sec2annotation(tosec), tosym, to_p);
75756 +*/
75757 + break;
75758 }
75759 fprintf(stderr, "\n");
75760 }
75761 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
75762 static void check_sec_ref(struct module *mod, const char *modname,
75763 struct elf_info *elf)
75764 {
75765 - int i;
75766 + unsigned int i;
75767 Elf_Shdr *sechdrs = elf->sechdrs;
75768
75769 /* Walk through all sections */
75770 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
75771 va_end(ap);
75772 }
75773
75774 -void buf_write(struct buffer *buf, const char *s, int len)
75775 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
75776 {
75777 if (buf->size - buf->pos < len) {
75778 buf->size += len + SZ;
75779 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
75780 if (fstat(fileno(file), &st) < 0)
75781 goto close_write;
75782
75783 - if (st.st_size != b->pos)
75784 + if (st.st_size != (off_t)b->pos)
75785 goto close_write;
75786
75787 tmp = NOFAIL(malloc(b->pos));
75788 diff -urNp linux-3.0.8/scripts/mod/modpost.h linux-3.0.8/scripts/mod/modpost.h
75789 --- linux-3.0.8/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
75790 +++ linux-3.0.8/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
75791 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
75792
75793 struct buffer {
75794 char *p;
75795 - int pos;
75796 - int size;
75797 + unsigned int pos;
75798 + unsigned int size;
75799 };
75800
75801 void __attribute__((format(printf, 2, 3)))
75802 buf_printf(struct buffer *buf, const char *fmt, ...);
75803
75804 void
75805 -buf_write(struct buffer *buf, const char *s, int len);
75806 +buf_write(struct buffer *buf, const char *s, unsigned int len);
75807
75808 struct module {
75809 struct module *next;
75810 diff -urNp linux-3.0.8/scripts/mod/sumversion.c linux-3.0.8/scripts/mod/sumversion.c
75811 --- linux-3.0.8/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
75812 +++ linux-3.0.8/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
75813 @@ -470,7 +470,7 @@ static void write_version(const char *fi
75814 goto out;
75815 }
75816
75817 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
75818 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
75819 warn("writing sum in %s failed: %s\n",
75820 filename, strerror(errno));
75821 goto out;
75822 diff -urNp linux-3.0.8/scripts/pnmtologo.c linux-3.0.8/scripts/pnmtologo.c
75823 --- linux-3.0.8/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
75824 +++ linux-3.0.8/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
75825 @@ -237,14 +237,14 @@ static void write_header(void)
75826 fprintf(out, " * Linux logo %s\n", logoname);
75827 fputs(" */\n\n", out);
75828 fputs("#include <linux/linux_logo.h>\n\n", out);
75829 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
75830 + fprintf(out, "static unsigned char %s_data[] = {\n",
75831 logoname);
75832 }
75833
75834 static void write_footer(void)
75835 {
75836 fputs("\n};\n\n", out);
75837 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
75838 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
75839 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
75840 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
75841 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
75842 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
75843 fputs("\n};\n\n", out);
75844
75845 /* write logo clut */
75846 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
75847 + fprintf(out, "static unsigned char %s_clut[] = {\n",
75848 logoname);
75849 write_hex_cnt = 0;
75850 for (i = 0; i < logo_clutsize; i++) {
75851 diff -urNp linux-3.0.8/security/apparmor/lsm.c linux-3.0.8/security/apparmor/lsm.c
75852 --- linux-3.0.8/security/apparmor/lsm.c 2011-10-24 08:05:21.000000000 -0400
75853 +++ linux-3.0.8/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
75854 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
75855 return error;
75856 }
75857
75858 -static struct security_operations apparmor_ops = {
75859 +static struct security_operations apparmor_ops __read_only = {
75860 .name = "apparmor",
75861
75862 .ptrace_access_check = apparmor_ptrace_access_check,
75863 diff -urNp linux-3.0.8/security/commoncap.c linux-3.0.8/security/commoncap.c
75864 --- linux-3.0.8/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
75865 +++ linux-3.0.8/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
75866 @@ -28,6 +28,7 @@
75867 #include <linux/prctl.h>
75868 #include <linux/securebits.h>
75869 #include <linux/user_namespace.h>
75870 +#include <net/sock.h>
75871
75872 /*
75873 * If a non-root user executes a setuid-root binary in
75874 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
75875
75876 int cap_netlink_recv(struct sk_buff *skb, int cap)
75877 {
75878 - if (!cap_raised(current_cap(), cap))
75879 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
75880 return -EPERM;
75881 return 0;
75882 }
75883 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
75884 {
75885 const struct cred *cred = current_cred();
75886
75887 + if (gr_acl_enable_at_secure())
75888 + return 1;
75889 +
75890 if (cred->uid != 0) {
75891 if (bprm->cap_effective)
75892 return 1;
75893 diff -urNp linux-3.0.8/security/integrity/ima/ima_api.c linux-3.0.8/security/integrity/ima/ima_api.c
75894 --- linux-3.0.8/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
75895 +++ linux-3.0.8/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
75896 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
75897 int result;
75898
75899 /* can overflow, only indicator */
75900 - atomic_long_inc(&ima_htable.violations);
75901 + atomic_long_inc_unchecked(&ima_htable.violations);
75902
75903 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
75904 if (!entry) {
75905 diff -urNp linux-3.0.8/security/integrity/ima/ima_fs.c linux-3.0.8/security/integrity/ima/ima_fs.c
75906 --- linux-3.0.8/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
75907 +++ linux-3.0.8/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
75908 @@ -28,12 +28,12 @@
75909 static int valid_policy = 1;
75910 #define TMPBUFLEN 12
75911 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
75912 - loff_t *ppos, atomic_long_t *val)
75913 + loff_t *ppos, atomic_long_unchecked_t *val)
75914 {
75915 char tmpbuf[TMPBUFLEN];
75916 ssize_t len;
75917
75918 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
75919 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
75920 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
75921 }
75922
75923 diff -urNp linux-3.0.8/security/integrity/ima/ima.h linux-3.0.8/security/integrity/ima/ima.h
75924 --- linux-3.0.8/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
75925 +++ linux-3.0.8/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
75926 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
75927 extern spinlock_t ima_queue_lock;
75928
75929 struct ima_h_table {
75930 - atomic_long_t len; /* number of stored measurements in the list */
75931 - atomic_long_t violations;
75932 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
75933 + atomic_long_unchecked_t violations;
75934 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
75935 };
75936 extern struct ima_h_table ima_htable;
75937 diff -urNp linux-3.0.8/security/integrity/ima/ima_queue.c linux-3.0.8/security/integrity/ima/ima_queue.c
75938 --- linux-3.0.8/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
75939 +++ linux-3.0.8/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
75940 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
75941 INIT_LIST_HEAD(&qe->later);
75942 list_add_tail_rcu(&qe->later, &ima_measurements);
75943
75944 - atomic_long_inc(&ima_htable.len);
75945 + atomic_long_inc_unchecked(&ima_htable.len);
75946 key = ima_hash_key(entry->digest);
75947 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
75948 return 0;
75949 diff -urNp linux-3.0.8/security/Kconfig linux-3.0.8/security/Kconfig
75950 --- linux-3.0.8/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
75951 +++ linux-3.0.8/security/Kconfig 2011-10-06 04:19:25.000000000 -0400
75952 @@ -4,6 +4,558 @@
75953
75954 menu "Security options"
75955
75956 +source grsecurity/Kconfig
75957 +
75958 +menu "PaX"
75959 +
75960 + config ARCH_TRACK_EXEC_LIMIT
75961 + bool
75962 +
75963 + config PAX_KERNEXEC_PLUGIN
75964 + bool
75965 +
75966 + config PAX_PER_CPU_PGD
75967 + bool
75968 +
75969 + config TASK_SIZE_MAX_SHIFT
75970 + int
75971 + depends on X86_64
75972 + default 47 if !PAX_PER_CPU_PGD
75973 + default 42 if PAX_PER_CPU_PGD
75974 +
75975 + config PAX_ENABLE_PAE
75976 + bool
75977 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
75978 +
75979 +config PAX
75980 + bool "Enable various PaX features"
75981 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
75982 + help
75983 + This allows you to enable various PaX features. PaX adds
75984 + intrusion prevention mechanisms to the kernel that reduce
75985 + the risks posed by exploitable memory corruption bugs.
75986 +
75987 +menu "PaX Control"
75988 + depends on PAX
75989 +
75990 +config PAX_SOFTMODE
75991 + bool 'Support soft mode'
75992 + select PAX_PT_PAX_FLAGS
75993 + help
75994 + Enabling this option will allow you to run PaX in soft mode, that
75995 + is, PaX features will not be enforced by default, only on executables
75996 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
75997 + is the only way to mark executables for soft mode use.
75998 +
75999 + Soft mode can be activated by using the "pax_softmode=1" kernel command
76000 + line option on boot. Furthermore you can control various PaX features
76001 + at runtime via the entries in /proc/sys/kernel/pax.
76002 +
76003 +config PAX_EI_PAX
76004 + bool 'Use legacy ELF header marking'
76005 + help
76006 + Enabling this option will allow you to control PaX features on
76007 + a per executable basis via the 'chpax' utility available at
76008 + http://pax.grsecurity.net/. The control flags will be read from
76009 + an otherwise reserved part of the ELF header. This marking has
76010 + numerous drawbacks (no support for soft-mode, toolchain does not
76011 + know about the non-standard use of the ELF header) therefore it
76012 + has been deprecated in favour of PT_PAX_FLAGS support.
76013 +
76014 + Note that if you enable PT_PAX_FLAGS marking support as well,
76015 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
76016 +
76017 +config PAX_PT_PAX_FLAGS
76018 + bool 'Use ELF program header marking'
76019 + help
76020 + Enabling this option will allow you to control PaX features on
76021 + a per executable basis via the 'paxctl' utility available at
76022 + http://pax.grsecurity.net/. The control flags will be read from
76023 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
76024 + has the benefits of supporting both soft mode and being fully
76025 + integrated into the toolchain (the binutils patch is available
76026 + from http://pax.grsecurity.net).
76027 +
76028 + If your toolchain does not support PT_PAX_FLAGS markings,
76029 + you can create one in most cases with 'paxctl -C'.
76030 +
76031 + Note that if you enable the legacy EI_PAX marking support as well,
76032 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
76033 +
76034 +choice
76035 + prompt 'MAC system integration'
76036 + default PAX_HAVE_ACL_FLAGS
76037 + help
76038 + Mandatory Access Control systems have the option of controlling
76039 + PaX flags on a per executable basis, choose the method supported
76040 + by your particular system.
76041 +
76042 + - "none": if your MAC system does not interact with PaX,
76043 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
76044 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
76045 +
76046 + NOTE: this option is for developers/integrators only.
76047 +
76048 + config PAX_NO_ACL_FLAGS
76049 + bool 'none'
76050 +
76051 + config PAX_HAVE_ACL_FLAGS
76052 + bool 'direct'
76053 +
76054 + config PAX_HOOK_ACL_FLAGS
76055 + bool 'hook'
76056 +endchoice
76057 +
76058 +endmenu
76059 +
76060 +menu "Non-executable pages"
76061 + depends on PAX
76062 +
76063 +config PAX_NOEXEC
76064 + bool "Enforce non-executable pages"
76065 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
76066 + help
76067 + By design some architectures do not allow for protecting memory
76068 + pages against execution or even if they do, Linux does not make
76069 + use of this feature. In practice this means that if a page is
76070 + readable (such as the stack or heap) it is also executable.
76071 +
76072 + There is a well known exploit technique that makes use of this
76073 + fact and a common programming mistake where an attacker can
76074 + introduce code of his choice somewhere in the attacked program's
76075 + memory (typically the stack or the heap) and then execute it.
76076 +
76077 + If the attacked program was running with different (typically
76078 + higher) privileges than that of the attacker, then he can elevate
76079 + his own privilege level (e.g. get a root shell, write to files for
76080 + which he does not have write access to, etc).
76081 +
76082 + Enabling this option will let you choose from various features
76083 + that prevent the injection and execution of 'foreign' code in
76084 + a program.
76085 +
76086 + This will also break programs that rely on the old behaviour and
76087 + expect that dynamically allocated memory via the malloc() family
76088 + of functions is executable (which it is not). Notable examples
76089 + are the XFree86 4.x server, the java runtime and wine.
76090 +
76091 +config PAX_PAGEEXEC
76092 + bool "Paging based non-executable pages"
76093 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
76094 + select S390_SWITCH_AMODE if S390
76095 + select S390_EXEC_PROTECT if S390
76096 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76097 + help
76098 + This implementation is based on the paging feature of the CPU.
76099 + On i386 without hardware non-executable bit support there is a
76100 + variable but usually low performance impact, however on Intel's
76101 + P4 core based CPUs it is very high so you should not enable this
76102 + for kernels meant to be used on such CPUs.
76103 +
76104 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76105 + with hardware non-executable bit support there is no performance
76106 + impact, on ppc the impact is negligible.
76107 +
76108 + Note that several architectures require various emulations due to
76109 + badly designed userland ABIs, this will cause a performance impact
76110 + but will disappear as soon as userland is fixed. For example, ppc
76111 + userland MUST have been built with secure-plt by a recent toolchain.
76112 +
76113 +config PAX_SEGMEXEC
76114 + bool "Segmentation based non-executable pages"
76115 + depends on PAX_NOEXEC && X86_32
76116 + help
76117 + This implementation is based on the segmentation feature of the
76118 + CPU and has a very small performance impact, however applications
76119 + will be limited to a 1.5 GB address space instead of the normal
76120 + 3 GB.
76121 +
76122 +config PAX_EMUTRAMP
76123 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76124 + default y if PARISC
76125 + help
76126 + There are some programs and libraries that for one reason or
76127 + another attempt to execute special small code snippets from
76128 + non-executable memory pages. Most notable examples are the
76129 + signal handler return code generated by the kernel itself and
76130 + the GCC trampolines.
76131 +
76132 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76133 + such programs will no longer work under your kernel.
76134 +
76135 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76136 + utilities to enable trampoline emulation for the affected programs
76137 + yet still have the protection provided by the non-executable pages.
76138 +
76139 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76140 + your system will not even boot.
76141 +
76142 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76143 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76144 + for the affected files.
76145 +
76146 + NOTE: enabling this feature *may* open up a loophole in the
76147 + protection provided by non-executable pages that an attacker
76148 + could abuse. Therefore the best solution is to not have any
76149 + files on your system that would require this option. This can
76150 + be achieved by not using libc5 (which relies on the kernel
76151 + signal handler return code) and not using or rewriting programs
76152 + that make use of the nested function implementation of GCC.
76153 + Skilled users can just fix GCC itself so that it implements
76154 + nested function calls in a way that does not interfere with PaX.
76155 +
76156 +config PAX_EMUSIGRT
76157 + bool "Automatically emulate sigreturn trampolines"
76158 + depends on PAX_EMUTRAMP && PARISC
76159 + default y
76160 + help
76161 + Enabling this option will have the kernel automatically detect
76162 + and emulate signal return trampolines executing on the stack
76163 + that would otherwise lead to task termination.
76164 +
76165 + This solution is intended as a temporary one for users with
76166 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76167 + Modula-3 runtime, etc) or executables linked to such, basically
76168 + everything that does not specify its own SA_RESTORER function in
76169 + normal executable memory like glibc 2.1+ does.
76170 +
76171 + On parisc you MUST enable this option, otherwise your system will
76172 + not even boot.
76173 +
76174 + NOTE: this feature cannot be disabled on a per executable basis
76175 + and since it *does* open up a loophole in the protection provided
76176 + by non-executable pages, the best solution is to not have any
76177 + files on your system that would require this option.
76178 +
76179 +config PAX_MPROTECT
76180 + bool "Restrict mprotect()"
76181 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76182 + help
76183 + Enabling this option will prevent programs from
76184 + - changing the executable status of memory pages that were
76185 + not originally created as executable,
76186 + - making read-only executable pages writable again,
76187 + - creating executable pages from anonymous memory,
76188 + - making read-only-after-relocations (RELRO) data pages writable again.
76189 +
76190 + You should say Y here to complete the protection provided by
76191 + the enforcement of non-executable pages.
76192 +
76193 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76194 + this feature on a per file basis.
76195 +
76196 +config PAX_MPROTECT_COMPAT
76197 + bool "Use legacy/compat protection demoting (read help)"
76198 + depends on PAX_MPROTECT
76199 + default n
76200 + help
76201 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76202 + by sending the proper error code to the application. For some broken
76203 + userland, this can cause problems with Python or other applications. The
76204 + current implementation however allows for applications like clamav to
76205 + detect if JIT compilation/execution is allowed and to fall back gracefully
76206 + to an interpreter-based mode if it does not. While we encourage everyone
76207 + to use the current implementation as-is and push upstream to fix broken
76208 + userland (note that the RWX logging option can assist with this), in some
76209 + environments this may not be possible. Having to disable MPROTECT
76210 + completely on certain binaries reduces the security benefit of PaX,
76211 + so this option is provided for those environments to revert to the old
76212 + behavior.
76213 +
76214 +config PAX_ELFRELOCS
76215 + bool "Allow ELF text relocations (read help)"
76216 + depends on PAX_MPROTECT
76217 + default n
76218 + help
76219 + Non-executable pages and mprotect() restrictions are effective
76220 + in preventing the introduction of new executable code into an
76221 + attacked task's address space. There remain only two venues
76222 + for this kind of attack: if the attacker can execute already
76223 + existing code in the attacked task then he can either have it
76224 + create and mmap() a file containing his code or have it mmap()
76225 + an already existing ELF library that does not have position
76226 + independent code in it and use mprotect() on it to make it
76227 + writable and copy his code there. While protecting against
76228 + the former approach is beyond PaX, the latter can be prevented
76229 + by having only PIC ELF libraries on one's system (which do not
76230 + need to relocate their code). If you are sure this is your case,
76231 + as is the case with all modern Linux distributions, then leave
76232 + this option disabled. You should say 'n' here.
76233 +
76234 +config PAX_ETEXECRELOCS
76235 + bool "Allow ELF ET_EXEC text relocations"
76236 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76237 + select PAX_ELFRELOCS
76238 + default y
76239 + help
76240 + On some architectures there are incorrectly created applications
76241 + that require text relocations and would not work without enabling
76242 + this option. If you are an alpha, ia64 or parisc user, you should
76243 + enable this option and disable it once you have made sure that
76244 + none of your applications need it.
76245 +
76246 +config PAX_EMUPLT
76247 + bool "Automatically emulate ELF PLT"
76248 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76249 + default y
76250 + help
76251 + Enabling this option will have the kernel automatically detect
76252 + and emulate the Procedure Linkage Table entries in ELF files.
76253 + On some architectures such entries are in writable memory, and
76254 + become non-executable leading to task termination. Therefore
76255 + it is mandatory that you enable this option on alpha, parisc,
76256 + sparc and sparc64, otherwise your system would not even boot.
76257 +
76258 + NOTE: this feature *does* open up a loophole in the protection
76259 + provided by the non-executable pages, therefore the proper
76260 + solution is to modify the toolchain to produce a PLT that does
76261 + not need to be writable.
76262 +
76263 +config PAX_DLRESOLVE
76264 + bool 'Emulate old glibc resolver stub'
76265 + depends on PAX_EMUPLT && SPARC
76266 + default n
76267 + help
76268 + This option is needed if userland has an old glibc (before 2.4)
76269 + that puts a 'save' instruction into the runtime generated resolver
76270 + stub that needs special emulation.
76271 +
76272 +config PAX_KERNEXEC
76273 + bool "Enforce non-executable kernel pages"
76274 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76275 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76276 + select PAX_KERNEXEC_PLUGIN if X86_64
76277 + help
76278 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76279 + that is, enabling this option will make it harder to inject
76280 + and execute 'foreign' code in kernel memory itself.
76281 +
76282 + Note that on x86_64 kernels there is a known regression when
76283 + this feature and KVM/VMX are both enabled in the host kernel.
76284 +
76285 +config PAX_KERNEXEC_MODULE_TEXT
76286 + int "Minimum amount of memory reserved for module code"
76287 + default "4"
76288 + depends on PAX_KERNEXEC && X86_32 && MODULES
76289 + help
76290 + Due to implementation details the kernel must reserve a fixed
76291 + amount of memory for module code at compile time that cannot be
76292 + changed at runtime. Here you can specify the minimum amount
76293 + in MB that will be reserved. Due to the same implementation
76294 + details this size will always be rounded up to the next 2/4 MB
76295 + boundary (depends on PAE) so the actually available memory for
76296 + module code will usually be more than this minimum.
76297 +
76298 + The default 4 MB should be enough for most users but if you have
76299 + an excessive number of modules (e.g., most distribution configs
76300 + compile many drivers as modules) or use huge modules such as
76301 + nvidia's kernel driver, you will need to adjust this amount.
76302 + A good rule of thumb is to look at your currently loaded kernel
76303 + modules and add up their sizes.
76304 +
76305 +endmenu
76306 +
76307 +menu "Address Space Layout Randomization"
76308 + depends on PAX
76309 +
76310 +config PAX_ASLR
76311 + bool "Address Space Layout Randomization"
76312 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
76313 + help
76314 + Many if not most exploit techniques rely on the knowledge of
76315 + certain addresses in the attacked program. The following options
76316 + will allow the kernel to apply a certain amount of randomization
76317 + to specific parts of the program thereby forcing an attacker to
76318 + guess them in most cases. Any failed guess will most likely crash
76319 + the attacked program which allows the kernel to detect such attempts
76320 + and react on them. PaX itself provides no reaction mechanisms,
76321 + instead it is strongly encouraged that you make use of Nergal's
76322 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76323 + (http://www.grsecurity.net/) built-in crash detection features or
76324 + develop one yourself.
76325 +
76326 + By saying Y here you can choose to randomize the following areas:
76327 + - top of the task's kernel stack
76328 + - top of the task's userland stack
76329 + - base address for mmap() requests that do not specify one
76330 + (this includes all libraries)
76331 + - base address of the main executable
76332 +
76333 + It is strongly recommended to say Y here as address space layout
76334 + randomization has negligible impact on performance yet it provides
76335 + a very effective protection.
76336 +
76337 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76338 + this feature on a per file basis.
76339 +
76340 +config PAX_RANDKSTACK
76341 + bool "Randomize kernel stack base"
76342 + depends on X86_TSC && X86
76343 + help
76344 + By saying Y here the kernel will randomize every task's kernel
76345 + stack on every system call. This will not only force an attacker
76346 + to guess it but also prevent him from making use of possible
76347 + leaked information about it.
76348 +
76349 + Since the kernel stack is a rather scarce resource, randomization
76350 + may cause unexpected stack overflows, therefore you should very
76351 + carefully test your system. Note that once enabled in the kernel
76352 + configuration, this feature cannot be disabled on a per file basis.
76353 +
76354 +config PAX_RANDUSTACK
76355 + bool "Randomize user stack base"
76356 + depends on PAX_ASLR
76357 + help
76358 + By saying Y here the kernel will randomize every task's userland
76359 + stack. The randomization is done in two steps where the second
76360 + one may apply a big amount of shift to the top of the stack and
76361 + cause problems for programs that want to use lots of memory (more
76362 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76363 + For this reason the second step can be controlled by 'chpax' or
76364 + 'paxctl' on a per file basis.
76365 +
76366 +config PAX_RANDMMAP
76367 + bool "Randomize mmap() base"
76368 + depends on PAX_ASLR
76369 + help
76370 + By saying Y here the kernel will use a randomized base address for
76371 + mmap() requests that do not specify one themselves. As a result
76372 + all dynamically loaded libraries will appear at random addresses
76373 + and therefore be harder to exploit by a technique where an attacker
76374 + attempts to execute library code for his purposes (e.g. spawn a
76375 + shell from an exploited program that is running at an elevated
76376 + privilege level).
76377 +
76378 + Furthermore, if a program is relinked as a dynamic ELF file, its
76379 + base address will be randomized as well, completing the full
76380 + randomization of the address space layout. Attacking such programs
76381 + becomes a guess game. You can find an example of doing this at
76382 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76383 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76384 +
76385 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76386 + feature on a per file basis.
76387 +
76388 +endmenu
76389 +
76390 +menu "Miscellaneous hardening features"
76391 +
76392 +config PAX_MEMORY_SANITIZE
76393 + bool "Sanitize all freed memory"
76394 + help
76395 + By saying Y here the kernel will erase memory pages as soon as they
76396 + are freed. This in turn reduces the lifetime of data stored in the
76397 + pages, making it less likely that sensitive information such as
76398 + passwords, cryptographic secrets, etc stay in memory for too long.
76399 +
76400 + This is especially useful for programs whose runtime is short, long
76401 + lived processes and the kernel itself benefit from this as long as
76402 + they operate on whole memory pages and ensure timely freeing of pages
76403 + that may hold sensitive information.
76404 +
76405 + The tradeoff is performance impact, on a single CPU system kernel
76406 + compilation sees a 3% slowdown, other systems and workloads may vary
76407 + and you are advised to test this feature on your expected workload
76408 + before deploying it.
76409 +
76410 + Note that this feature does not protect data stored in live pages,
76411 + e.g., process memory swapped to disk may stay there for a long time.
76412 +
76413 +config PAX_MEMORY_STACKLEAK
76414 + bool "Sanitize kernel stack"
76415 + depends on X86
76416 + help
76417 + By saying Y here the kernel will erase the kernel stack before it
76418 + returns from a system call. This in turn reduces the information
76419 + that a kernel stack leak bug can reveal.
76420 +
76421 + Note that such a bug can still leak information that was put on
76422 + the stack by the current system call (the one eventually triggering
76423 + the bug) but traces of earlier system calls on the kernel stack
76424 + cannot leak anymore.
76425 +
76426 + The tradeoff is performance impact: on a single CPU system kernel
76427 + compilation sees a 1% slowdown, other systems and workloads may vary
76428 + and you are advised to test this feature on your expected workload
76429 + before deploying it.
76430 +
76431 + Note: full support for this feature requires gcc with plugin support
76432 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
76433 + is not supported). Using older gcc versions means that functions
76434 + with large enough stack frames may leave uninitialized memory behind
76435 + that may be exposed to a later syscall leaking the stack.
76436 +
76437 +config PAX_MEMORY_UDEREF
76438 + bool "Prevent invalid userland pointer dereference"
76439 + depends on X86 && !UML_X86 && !XEN
76440 + select PAX_PER_CPU_PGD if X86_64
76441 + help
76442 + By saying Y here the kernel will be prevented from dereferencing
76443 + userland pointers in contexts where the kernel expects only kernel
76444 + pointers. This is both a useful runtime debugging feature and a
76445 + security measure that prevents exploiting a class of kernel bugs.
76446 +
76447 + The tradeoff is that some virtualization solutions may experience
76448 + a huge slowdown and therefore you should not enable this feature
76449 + for kernels meant to run in such environments. Whether a given VM
76450 + solution is affected or not is best determined by simply trying it
76451 + out, the performance impact will be obvious right on boot as this
76452 + mechanism engages from very early on. A good rule of thumb is that
76453 + VMs running on CPUs without hardware virtualization support (i.e.,
76454 + the majority of IA-32 CPUs) will likely experience the slowdown.
76455 +
76456 +config PAX_REFCOUNT
76457 + bool "Prevent various kernel object reference counter overflows"
76458 + depends on GRKERNSEC && (X86 || SPARC64)
76459 + help
76460 + By saying Y here the kernel will detect and prevent overflowing
76461 + various (but not all) kinds of object reference counters. Such
76462 + overflows can normally occur due to bugs only and are often, if
76463 + not always, exploitable.
76464 +
76465 + The tradeoff is that data structures protected by an overflowed
76466 + refcount will never be freed and therefore will leak memory. Note
76467 + that this leak also happens even without this protection but in
76468 + that case the overflow can eventually trigger the freeing of the
76469 + data structure while it is still being used elsewhere, resulting
76470 + in the exploitable situation that this feature prevents.
76471 +
76472 + Since this has a negligible performance impact, you should enable
76473 + this feature.
76474 +
76475 +config PAX_USERCOPY
76476 + bool "Harden heap object copies between kernel and userland"
76477 + depends on X86 || PPC || SPARC || ARM
76478 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76479 + help
76480 + By saying Y here the kernel will enforce the size of heap objects
76481 + when they are copied in either direction between the kernel and
76482 + userland, even if only a part of the heap object is copied.
76483 +
76484 + Specifically, this checking prevents information leaking from the
76485 + kernel heap during kernel to userland copies (if the kernel heap
76486 + object is otherwise fully initialized) and prevents kernel heap
76487 + overflows during userland to kernel copies.
76488 +
76489 + Note that the current implementation provides the strictest bounds
76490 + checks for the SLUB allocator.
76491 +
76492 + Enabling this option also enables per-slab cache protection against
76493 + data in a given cache being copied into/out of via userland
76494 + accessors. Though the whitelist of regions will be reduced over
76495 + time, it notably protects important data structures like task structs.
76496 +
76497 + If frame pointers are enabled on x86, this option will also restrict
76498 + copies into and out of the kernel stack to local variables within a
76499 + single frame.
76500 +
76501 + Since this has a negligible performance impact, you should enable
76502 + this feature.
76503 +
76504 +endmenu
76505 +
76506 +endmenu
76507 +
76508 config KEYS
76509 bool "Enable access key retention support"
76510 help
76511 @@ -167,7 +719,7 @@ config INTEL_TXT
76512 config LSM_MMAP_MIN_ADDR
76513 int "Low address space for LSM to protect from user allocation"
76514 depends on SECURITY && SECURITY_SELINUX
76515 - default 32768 if ARM
76516 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76517 default 65536
76518 help
76519 This is the portion of low virtual memory which should be protected
76520 diff -urNp linux-3.0.8/security/keys/compat.c linux-3.0.8/security/keys/compat.c
76521 --- linux-3.0.8/security/keys/compat.c 2011-07-21 22:17:23.000000000 -0400
76522 +++ linux-3.0.8/security/keys/compat.c 2011-10-06 04:17:55.000000000 -0400
76523 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
76524 if (ret == 0)
76525 goto no_payload_free;
76526
76527 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76528 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76529
76530 if (iov != iovstack)
76531 kfree(iov);
76532 diff -urNp linux-3.0.8/security/keys/keyctl.c linux-3.0.8/security/keys/keyctl.c
76533 --- linux-3.0.8/security/keys/keyctl.c 2011-07-21 22:17:23.000000000 -0400
76534 +++ linux-3.0.8/security/keys/keyctl.c 2011-10-06 04:17:55.000000000 -0400
76535 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(str
76536 /*
76537 * Copy the iovec data from userspace
76538 */
76539 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76540 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
76541 unsigned ioc)
76542 {
76543 for (; ioc > 0; ioc--) {
76544 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *b
76545 * If successful, 0 will be returned.
76546 */
76547 long keyctl_instantiate_key_common(key_serial_t id,
76548 - const struct iovec *payload_iov,
76549 + const struct iovec __user *payload_iov,
76550 unsigned ioc,
76551 size_t plen,
76552 key_serial_t ringid)
76553 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t
76554 [0].iov_len = plen
76555 };
76556
76557 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
76558 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
76559 }
76560
76561 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
76562 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_seri
76563 if (ret == 0)
76564 goto no_payload_free;
76565
76566 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76567 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76568
76569 if (iov != iovstack)
76570 kfree(iov);
76571 diff -urNp linux-3.0.8/security/keys/keyring.c linux-3.0.8/security/keys/keyring.c
76572 --- linux-3.0.8/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
76573 +++ linux-3.0.8/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
76574 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
76575 ret = -EFAULT;
76576
76577 for (loop = 0; loop < klist->nkeys; loop++) {
76578 + key_serial_t serial;
76579 key = klist->keys[loop];
76580 + serial = key->serial;
76581
76582 tmp = sizeof(key_serial_t);
76583 if (tmp > buflen)
76584 tmp = buflen;
76585
76586 - if (copy_to_user(buffer,
76587 - &key->serial,
76588 - tmp) != 0)
76589 + if (copy_to_user(buffer, &serial, tmp))
76590 goto error;
76591
76592 buflen -= tmp;
76593 diff -urNp linux-3.0.8/security/min_addr.c linux-3.0.8/security/min_addr.c
76594 --- linux-3.0.8/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
76595 +++ linux-3.0.8/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
76596 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
76597 */
76598 static void update_mmap_min_addr(void)
76599 {
76600 +#ifndef SPARC
76601 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
76602 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
76603 mmap_min_addr = dac_mmap_min_addr;
76604 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
76605 #else
76606 mmap_min_addr = dac_mmap_min_addr;
76607 #endif
76608 +#endif
76609 }
76610
76611 /*
76612 diff -urNp linux-3.0.8/security/security.c linux-3.0.8/security/security.c
76613 --- linux-3.0.8/security/security.c 2011-07-21 22:17:23.000000000 -0400
76614 +++ linux-3.0.8/security/security.c 2011-08-23 21:48:14.000000000 -0400
76615 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
76616 /* things that live in capability.c */
76617 extern void __init security_fixup_ops(struct security_operations *ops);
76618
76619 -static struct security_operations *security_ops;
76620 -static struct security_operations default_security_ops = {
76621 +static struct security_operations *security_ops __read_only;
76622 +static struct security_operations default_security_ops __read_only = {
76623 .name = "default",
76624 };
76625
76626 @@ -67,7 +67,9 @@ int __init security_init(void)
76627
76628 void reset_security_ops(void)
76629 {
76630 + pax_open_kernel();
76631 security_ops = &default_security_ops;
76632 + pax_close_kernel();
76633 }
76634
76635 /* Save user chosen LSM */
76636 diff -urNp linux-3.0.8/security/selinux/hooks.c linux-3.0.8/security/selinux/hooks.c
76637 --- linux-3.0.8/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
76638 +++ linux-3.0.8/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
76639 @@ -93,7 +93,6 @@
76640 #define NUM_SEL_MNT_OPTS 5
76641
76642 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
76643 -extern struct security_operations *security_ops;
76644
76645 /* SECMARK reference count */
76646 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
76647 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
76648
76649 #endif
76650
76651 -static struct security_operations selinux_ops = {
76652 +static struct security_operations selinux_ops __read_only = {
76653 .name = "selinux",
76654
76655 .ptrace_access_check = selinux_ptrace_access_check,
76656 diff -urNp linux-3.0.8/security/selinux/include/xfrm.h linux-3.0.8/security/selinux/include/xfrm.h
76657 --- linux-3.0.8/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
76658 +++ linux-3.0.8/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
76659 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
76660
76661 static inline void selinux_xfrm_notify_policyload(void)
76662 {
76663 - atomic_inc(&flow_cache_genid);
76664 + atomic_inc_unchecked(&flow_cache_genid);
76665 }
76666 #else
76667 static inline int selinux_xfrm_enabled(void)
76668 diff -urNp linux-3.0.8/security/selinux/ss/services.c linux-3.0.8/security/selinux/ss/services.c
76669 --- linux-3.0.8/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
76670 +++ linux-3.0.8/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
76671 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
76672 int rc = 0;
76673 struct policy_file file = { data, len }, *fp = &file;
76674
76675 + pax_track_stack();
76676 +
76677 if (!ss_initialized) {
76678 avtab_cache_init();
76679 rc = policydb_read(&policydb, fp);
76680 diff -urNp linux-3.0.8/security/smack/smack_lsm.c linux-3.0.8/security/smack/smack_lsm.c
76681 --- linux-3.0.8/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
76682 +++ linux-3.0.8/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
76683 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
76684 return 0;
76685 }
76686
76687 -struct security_operations smack_ops = {
76688 +struct security_operations smack_ops __read_only = {
76689 .name = "smack",
76690
76691 .ptrace_access_check = smack_ptrace_access_check,
76692 diff -urNp linux-3.0.8/security/tomoyo/tomoyo.c linux-3.0.8/security/tomoyo/tomoyo.c
76693 --- linux-3.0.8/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
76694 +++ linux-3.0.8/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
76695 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
76696 * tomoyo_security_ops is a "struct security_operations" which is used for
76697 * registering TOMOYO.
76698 */
76699 -static struct security_operations tomoyo_security_ops = {
76700 +static struct security_operations tomoyo_security_ops __read_only = {
76701 .name = "tomoyo",
76702 .cred_alloc_blank = tomoyo_cred_alloc_blank,
76703 .cred_prepare = tomoyo_cred_prepare,
76704 diff -urNp linux-3.0.8/sound/aoa/codecs/onyx.c linux-3.0.8/sound/aoa/codecs/onyx.c
76705 --- linux-3.0.8/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
76706 +++ linux-3.0.8/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
76707 @@ -54,7 +54,7 @@ struct onyx {
76708 spdif_locked:1,
76709 analog_locked:1,
76710 original_mute:2;
76711 - int open_count;
76712 + local_t open_count;
76713 struct codec_info *codec_info;
76714
76715 /* mutex serializes concurrent access to the device
76716 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
76717 struct onyx *onyx = cii->codec_data;
76718
76719 mutex_lock(&onyx->mutex);
76720 - onyx->open_count++;
76721 + local_inc(&onyx->open_count);
76722 mutex_unlock(&onyx->mutex);
76723
76724 return 0;
76725 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
76726 struct onyx *onyx = cii->codec_data;
76727
76728 mutex_lock(&onyx->mutex);
76729 - onyx->open_count--;
76730 - if (!onyx->open_count)
76731 + if (local_dec_and_test(&onyx->open_count))
76732 onyx->spdif_locked = onyx->analog_locked = 0;
76733 mutex_unlock(&onyx->mutex);
76734
76735 diff -urNp linux-3.0.8/sound/aoa/codecs/onyx.h linux-3.0.8/sound/aoa/codecs/onyx.h
76736 --- linux-3.0.8/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
76737 +++ linux-3.0.8/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
76738 @@ -11,6 +11,7 @@
76739 #include <linux/i2c.h>
76740 #include <asm/pmac_low_i2c.h>
76741 #include <asm/prom.h>
76742 +#include <asm/local.h>
76743
76744 /* PCM3052 register definitions */
76745
76746 diff -urNp linux-3.0.8/sound/core/oss/pcm_oss.c linux-3.0.8/sound/core/oss/pcm_oss.c
76747 --- linux-3.0.8/sound/core/oss/pcm_oss.c 2011-07-21 22:17:23.000000000 -0400
76748 +++ linux-3.0.8/sound/core/oss/pcm_oss.c 2011-10-06 04:17:55.000000000 -0400
76749 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(str
76750 if (in_kernel) {
76751 mm_segment_t fs;
76752 fs = snd_enter_user();
76753 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76754 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76755 snd_leave_user(fs);
76756 } else {
76757 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76758 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76759 }
76760 if (ret != -EPIPE && ret != -ESTRPIPE)
76761 break;
76762 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(stru
76763 if (in_kernel) {
76764 mm_segment_t fs;
76765 fs = snd_enter_user();
76766 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76767 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76768 snd_leave_user(fs);
76769 } else {
76770 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76771 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76772 }
76773 if (ret == -EPIPE) {
76774 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
76775 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct
76776 struct snd_pcm_plugin_channel *channels;
76777 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
76778 if (!in_kernel) {
76779 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
76780 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
76781 return -EFAULT;
76782 buf = runtime->oss.buffer;
76783 }
76784 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct
76785 }
76786 } else {
76787 tmp = snd_pcm_oss_write2(substream,
76788 - (const char __force *)buf,
76789 + (const char __force_kernel *)buf,
76790 runtime->oss.period_bytes, 0);
76791 if (tmp <= 0)
76792 goto err;
76793 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct
76794 struct snd_pcm_runtime *runtime = substream->runtime;
76795 snd_pcm_sframes_t frames, frames1;
76796 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
76797 - char __user *final_dst = (char __force __user *)buf;
76798 + char __user *final_dst = (char __force_user *)buf;
76799 if (runtime->oss.plugin_first) {
76800 struct snd_pcm_plugin_channel *channels;
76801 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
76802 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct
76803 xfer += tmp;
76804 runtime->oss.buffer_used -= tmp;
76805 } else {
76806 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
76807 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
76808 runtime->oss.period_bytes, 0);
76809 if (tmp <= 0)
76810 goto err;
76811 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_p
76812 size1);
76813 size1 /= runtime->channels; /* frames */
76814 fs = snd_enter_user();
76815 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
76816 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
76817 snd_leave_user(fs);
76818 }
76819 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
76820 diff -urNp linux-3.0.8/sound/core/pcm_compat.c linux-3.0.8/sound/core/pcm_compat.c
76821 --- linux-3.0.8/sound/core/pcm_compat.c 2011-10-24 08:05:21.000000000 -0400
76822 +++ linux-3.0.8/sound/core/pcm_compat.c 2011-10-06 04:17:55.000000000 -0400
76823 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(st
76824 int err;
76825
76826 fs = snd_enter_user();
76827 - err = snd_pcm_delay(substream, &delay);
76828 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
76829 snd_leave_user(fs);
76830 if (err < 0)
76831 return err;
76832 diff -urNp linux-3.0.8/sound/core/pcm_native.c linux-3.0.8/sound/core/pcm_native.c
76833 --- linux-3.0.8/sound/core/pcm_native.c 2011-07-21 22:17:23.000000000 -0400
76834 +++ linux-3.0.8/sound/core/pcm_native.c 2011-10-06 04:17:55.000000000 -0400
76835 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_
76836 switch (substream->stream) {
76837 case SNDRV_PCM_STREAM_PLAYBACK:
76838 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
76839 - (void __user *)arg);
76840 + (void __force_user *)arg);
76841 break;
76842 case SNDRV_PCM_STREAM_CAPTURE:
76843 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
76844 - (void __user *)arg);
76845 + (void __force_user *)arg);
76846 break;
76847 default:
76848 result = -EINVAL;
76849 diff -urNp linux-3.0.8/sound/core/seq/seq_device.c linux-3.0.8/sound/core/seq/seq_device.c
76850 --- linux-3.0.8/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
76851 +++ linux-3.0.8/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
76852 @@ -63,7 +63,7 @@ struct ops_list {
76853 int argsize; /* argument size */
76854
76855 /* operators */
76856 - struct snd_seq_dev_ops ops;
76857 + struct snd_seq_dev_ops *ops;
76858
76859 /* registred devices */
76860 struct list_head dev_list; /* list of devices */
76861 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
76862
76863 mutex_lock(&ops->reg_mutex);
76864 /* copy driver operators */
76865 - ops->ops = *entry;
76866 + ops->ops = entry;
76867 ops->driver |= DRIVER_LOADED;
76868 ops->argsize = argsize;
76869
76870 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
76871 dev->name, ops->id, ops->argsize, dev->argsize);
76872 return -EINVAL;
76873 }
76874 - if (ops->ops.init_device(dev) >= 0) {
76875 + if (ops->ops->init_device(dev) >= 0) {
76876 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
76877 ops->num_init_devices++;
76878 } else {
76879 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
76880 dev->name, ops->id, ops->argsize, dev->argsize);
76881 return -EINVAL;
76882 }
76883 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
76884 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
76885 dev->status = SNDRV_SEQ_DEVICE_FREE;
76886 dev->driver_data = NULL;
76887 ops->num_init_devices--;
76888 diff -urNp linux-3.0.8/sound/drivers/mts64.c linux-3.0.8/sound/drivers/mts64.c
76889 --- linux-3.0.8/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
76890 +++ linux-3.0.8/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
76891 @@ -28,6 +28,7 @@
76892 #include <sound/initval.h>
76893 #include <sound/rawmidi.h>
76894 #include <sound/control.h>
76895 +#include <asm/local.h>
76896
76897 #define CARD_NAME "Miditerminal 4140"
76898 #define DRIVER_NAME "MTS64"
76899 @@ -66,7 +67,7 @@ struct mts64 {
76900 struct pardevice *pardev;
76901 int pardev_claimed;
76902
76903 - int open_count;
76904 + local_t open_count;
76905 int current_midi_output_port;
76906 int current_midi_input_port;
76907 u8 mode[MTS64_NUM_INPUT_PORTS];
76908 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
76909 {
76910 struct mts64 *mts = substream->rmidi->private_data;
76911
76912 - if (mts->open_count == 0) {
76913 + if (local_read(&mts->open_count) == 0) {
76914 /* We don't need a spinlock here, because this is just called
76915 if the device has not been opened before.
76916 So there aren't any IRQs from the device */
76917 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
76918
76919 msleep(50);
76920 }
76921 - ++(mts->open_count);
76922 + local_inc(&mts->open_count);
76923
76924 return 0;
76925 }
76926 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
76927 struct mts64 *mts = substream->rmidi->private_data;
76928 unsigned long flags;
76929
76930 - --(mts->open_count);
76931 - if (mts->open_count == 0) {
76932 + if (local_dec_return(&mts->open_count) == 0) {
76933 /* We need the spinlock_irqsave here because we can still
76934 have IRQs at this point */
76935 spin_lock_irqsave(&mts->lock, flags);
76936 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
76937
76938 msleep(500);
76939
76940 - } else if (mts->open_count < 0)
76941 - mts->open_count = 0;
76942 + } else if (local_read(&mts->open_count) < 0)
76943 + local_set(&mts->open_count, 0);
76944
76945 return 0;
76946 }
76947 diff -urNp linux-3.0.8/sound/drivers/opl4/opl4_lib.c linux-3.0.8/sound/drivers/opl4/opl4_lib.c
76948 --- linux-3.0.8/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
76949 +++ linux-3.0.8/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
76950 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
76951 MODULE_DESCRIPTION("OPL4 driver");
76952 MODULE_LICENSE("GPL");
76953
76954 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
76955 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
76956 {
76957 int timeout = 10;
76958 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
76959 diff -urNp linux-3.0.8/sound/drivers/portman2x4.c linux-3.0.8/sound/drivers/portman2x4.c
76960 --- linux-3.0.8/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
76961 +++ linux-3.0.8/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
76962 @@ -47,6 +47,7 @@
76963 #include <sound/initval.h>
76964 #include <sound/rawmidi.h>
76965 #include <sound/control.h>
76966 +#include <asm/local.h>
76967
76968 #define CARD_NAME "Portman 2x4"
76969 #define DRIVER_NAME "portman"
76970 @@ -84,7 +85,7 @@ struct portman {
76971 struct pardevice *pardev;
76972 int pardev_claimed;
76973
76974 - int open_count;
76975 + local_t open_count;
76976 int mode[PORTMAN_NUM_INPUT_PORTS];
76977 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
76978 };
76979 diff -urNp linux-3.0.8/sound/firewire/amdtp.c linux-3.0.8/sound/firewire/amdtp.c
76980 --- linux-3.0.8/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
76981 +++ linux-3.0.8/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
76982 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
76983 ptr = s->pcm_buffer_pointer + data_blocks;
76984 if (ptr >= pcm->runtime->buffer_size)
76985 ptr -= pcm->runtime->buffer_size;
76986 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
76987 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
76988
76989 s->pcm_period_pointer += data_blocks;
76990 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
76991 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
76992 */
76993 void amdtp_out_stream_update(struct amdtp_out_stream *s)
76994 {
76995 - ACCESS_ONCE(s->source_node_id_field) =
76996 + ACCESS_ONCE_RW(s->source_node_id_field) =
76997 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
76998 }
76999 EXPORT_SYMBOL(amdtp_out_stream_update);
77000 diff -urNp linux-3.0.8/sound/firewire/amdtp.h linux-3.0.8/sound/firewire/amdtp.h
77001 --- linux-3.0.8/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
77002 +++ linux-3.0.8/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
77003 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
77004 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77005 struct snd_pcm_substream *pcm)
77006 {
77007 - ACCESS_ONCE(s->pcm) = pcm;
77008 + ACCESS_ONCE_RW(s->pcm) = pcm;
77009 }
77010
77011 /**
77012 diff -urNp linux-3.0.8/sound/firewire/isight.c linux-3.0.8/sound/firewire/isight.c
77013 --- linux-3.0.8/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
77014 +++ linux-3.0.8/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
77015 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
77016 ptr += count;
77017 if (ptr >= runtime->buffer_size)
77018 ptr -= runtime->buffer_size;
77019 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77020 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77021
77022 isight->period_counter += count;
77023 if (isight->period_counter >= runtime->period_size) {
77024 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
77025 if (err < 0)
77026 return err;
77027
77028 - ACCESS_ONCE(isight->pcm_active) = true;
77029 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77030
77031 return 0;
77032 }
77033 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
77034 {
77035 struct isight *isight = substream->private_data;
77036
77037 - ACCESS_ONCE(isight->pcm_active) = false;
77038 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77039
77040 mutex_lock(&isight->mutex);
77041 isight_stop_streaming(isight);
77042 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
77043
77044 switch (cmd) {
77045 case SNDRV_PCM_TRIGGER_START:
77046 - ACCESS_ONCE(isight->pcm_running) = true;
77047 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77048 break;
77049 case SNDRV_PCM_TRIGGER_STOP:
77050 - ACCESS_ONCE(isight->pcm_running) = false;
77051 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77052 break;
77053 default:
77054 return -EINVAL;
77055 diff -urNp linux-3.0.8/sound/isa/cmi8330.c linux-3.0.8/sound/isa/cmi8330.c
77056 --- linux-3.0.8/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
77057 +++ linux-3.0.8/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
77058 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77059
77060 struct snd_pcm *pcm;
77061 struct snd_cmi8330_stream {
77062 - struct snd_pcm_ops ops;
77063 + snd_pcm_ops_no_const ops;
77064 snd_pcm_open_callback_t open;
77065 void *private_data; /* sb or wss */
77066 } streams[2];
77067 diff -urNp linux-3.0.8/sound/oss/sb_audio.c linux-3.0.8/sound/oss/sb_audio.c
77068 --- linux-3.0.8/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
77069 +++ linux-3.0.8/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
77070 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77071 buf16 = (signed short *)(localbuf + localoffs);
77072 while (c)
77073 {
77074 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77075 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77076 if (copy_from_user(lbuf8,
77077 userbuf+useroffs + p,
77078 locallen))
77079 diff -urNp linux-3.0.8/sound/oss/swarm_cs4297a.c linux-3.0.8/sound/oss/swarm_cs4297a.c
77080 --- linux-3.0.8/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
77081 +++ linux-3.0.8/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
77082 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77083 {
77084 struct cs4297a_state *s;
77085 u32 pwr, id;
77086 - mm_segment_t fs;
77087 int rval;
77088 #ifndef CONFIG_BCM_CS4297A_CSWARM
77089 u64 cfg;
77090 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77091 if (!rval) {
77092 char *sb1250_duart_present;
77093
77094 +#if 0
77095 + mm_segment_t fs;
77096 fs = get_fs();
77097 set_fs(KERNEL_DS);
77098 -#if 0
77099 val = SOUND_MASK_LINE;
77100 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77101 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77102 val = initvol[i].vol;
77103 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77104 }
77105 + set_fs(fs);
77106 // cs4297a_write_ac97(s, 0x18, 0x0808);
77107 #else
77108 // cs4297a_write_ac97(s, 0x5e, 0x180);
77109 cs4297a_write_ac97(s, 0x02, 0x0808);
77110 cs4297a_write_ac97(s, 0x18, 0x0808);
77111 #endif
77112 - set_fs(fs);
77113
77114 list_add(&s->list, &cs4297a_devs);
77115
77116 diff -urNp linux-3.0.8/sound/pci/hda/hda_codec.h linux-3.0.8/sound/pci/hda/hda_codec.h
77117 --- linux-3.0.8/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
77118 +++ linux-3.0.8/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
77119 @@ -615,7 +615,7 @@ struct hda_bus_ops {
77120 /* notify power-up/down from codec to controller */
77121 void (*pm_notify)(struct hda_bus *bus);
77122 #endif
77123 -};
77124 +} __no_const;
77125
77126 /* template to pass to the bus constructor */
77127 struct hda_bus_template {
77128 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77129 #endif
77130 void (*reboot_notify)(struct hda_codec *codec);
77131 };
77132 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77133
77134 /* record for amp information cache */
77135 struct hda_cache_head {
77136 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77137 struct snd_pcm_substream *substream);
77138 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77139 struct snd_pcm_substream *substream);
77140 -};
77141 +} __no_const;
77142
77143 /* PCM information for each substream */
77144 struct hda_pcm_stream {
77145 @@ -801,7 +802,7 @@ struct hda_codec {
77146 const char *modelname; /* model name for preset */
77147
77148 /* set by patch */
77149 - struct hda_codec_ops patch_ops;
77150 + hda_codec_ops_no_const patch_ops;
77151
77152 /* PCM to create, set by patch_ops.build_pcms callback */
77153 unsigned int num_pcms;
77154 diff -urNp linux-3.0.8/sound/pci/ice1712/ice1712.h linux-3.0.8/sound/pci/ice1712/ice1712.h
77155 --- linux-3.0.8/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
77156 +++ linux-3.0.8/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
77157 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77158 unsigned int mask_flags; /* total mask bits */
77159 struct snd_akm4xxx_ops {
77160 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77161 - } ops;
77162 + } __no_const ops;
77163 };
77164
77165 struct snd_ice1712_spdif {
77166 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77167 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77168 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77169 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77170 - } ops;
77171 + } __no_const ops;
77172 };
77173
77174
77175 diff -urNp linux-3.0.8/sound/pci/ymfpci/ymfpci_main.c linux-3.0.8/sound/pci/ymfpci/ymfpci_main.c
77176 --- linux-3.0.8/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
77177 +++ linux-3.0.8/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
77178 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
77179 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77180 break;
77181 }
77182 - if (atomic_read(&chip->interrupt_sleep_count)) {
77183 - atomic_set(&chip->interrupt_sleep_count, 0);
77184 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77185 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77186 wake_up(&chip->interrupt_sleep);
77187 }
77188 __end:
77189 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
77190 continue;
77191 init_waitqueue_entry(&wait, current);
77192 add_wait_queue(&chip->interrupt_sleep, &wait);
77193 - atomic_inc(&chip->interrupt_sleep_count);
77194 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77195 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77196 remove_wait_queue(&chip->interrupt_sleep, &wait);
77197 }
77198 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
77199 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77200 spin_unlock(&chip->reg_lock);
77201
77202 - if (atomic_read(&chip->interrupt_sleep_count)) {
77203 - atomic_set(&chip->interrupt_sleep_count, 0);
77204 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77205 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77206 wake_up(&chip->interrupt_sleep);
77207 }
77208 }
77209 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
77210 spin_lock_init(&chip->reg_lock);
77211 spin_lock_init(&chip->voice_lock);
77212 init_waitqueue_head(&chip->interrupt_sleep);
77213 - atomic_set(&chip->interrupt_sleep_count, 0);
77214 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77215 chip->card = card;
77216 chip->pci = pci;
77217 chip->irq = -1;
77218 diff -urNp linux-3.0.8/sound/soc/soc-core.c linux-3.0.8/sound/soc/soc-core.c
77219 --- linux-3.0.8/sound/soc/soc-core.c 2011-10-24 08:05:21.000000000 -0400
77220 +++ linux-3.0.8/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
77221 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
77222 }
77223
77224 /* ASoC PCM operations */
77225 -static struct snd_pcm_ops soc_pcm_ops = {
77226 +static snd_pcm_ops_no_const soc_pcm_ops = {
77227 .open = soc_pcm_open,
77228 .close = soc_codec_close,
77229 .hw_params = soc_pcm_hw_params,
77230 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
77231 rtd->pcm = pcm;
77232 pcm->private_data = rtd;
77233 if (platform->driver->ops) {
77234 + /* this whole logic is broken... */
77235 soc_pcm_ops.mmap = platform->driver->ops->mmap;
77236 soc_pcm_ops.pointer = platform->driver->ops->pointer;
77237 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
77238 diff -urNp linux-3.0.8/sound/usb/card.h linux-3.0.8/sound/usb/card.h
77239 --- linux-3.0.8/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
77240 +++ linux-3.0.8/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
77241 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77242 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77243 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77244 };
77245 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77246
77247 struct snd_usb_substream {
77248 struct snd_usb_stream *stream;
77249 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77250 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77251 spinlock_t lock;
77252
77253 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77254 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77255 };
77256
77257 struct snd_usb_stream {
77258 diff -urNp linux-3.0.8/tools/gcc/checker_plugin.c linux-3.0.8/tools/gcc/checker_plugin.c
77259 --- linux-3.0.8/tools/gcc/checker_plugin.c 1969-12-31 19:00:00.000000000 -0500
77260 +++ linux-3.0.8/tools/gcc/checker_plugin.c 2011-10-06 04:17:55.000000000 -0400
77261 @@ -0,0 +1,169 @@
77262 +/*
77263 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77264 + * Licensed under the GPL v2
77265 + *
77266 + * Note: the choice of the license means that the compilation process is
77267 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77268 + * but for the kernel it doesn't matter since it doesn't link against
77269 + * any of the gcc libraries
77270 + *
77271 + * gcc plugin to implement various sparse (source code checker) features
77272 + *
77273 + * TODO:
77274 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77275 + *
77276 + * BUGS:
77277 + * - none known
77278 + */
77279 +#include "gcc-plugin.h"
77280 +#include "config.h"
77281 +#include "system.h"
77282 +#include "coretypes.h"
77283 +#include "tree.h"
77284 +#include "tree-pass.h"
77285 +#include "intl.h"
77286 +#include "plugin-version.h"
77287 +#include "tm.h"
77288 +#include "toplev.h"
77289 +#include "basic-block.h"
77290 +#include "gimple.h"
77291 +//#include "expr.h" where are you...
77292 +#include "diagnostic.h"
77293 +#include "rtl.h"
77294 +#include "emit-rtl.h"
77295 +#include "function.h"
77296 +#include "tree-flow.h"
77297 +#include "target.h"
77298 +
77299 +extern void c_register_addr_space (const char *str, addr_space_t as);
77300 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77301 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77302 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77303 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77304 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77305 +
77306 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77307 +extern rtx emit_move_insn(rtx x, rtx y);
77308 +
77309 +int plugin_is_GPL_compatible;
77310 +
77311 +static struct plugin_info checker_plugin_info = {
77312 + .version = "201110031940",
77313 +};
77314 +
77315 +#define ADDR_SPACE_KERNEL 0
77316 +#define ADDR_SPACE_FORCE_KERNEL 1
77317 +#define ADDR_SPACE_USER 2
77318 +#define ADDR_SPACE_FORCE_USER 3
77319 +#define ADDR_SPACE_IOMEM 0
77320 +#define ADDR_SPACE_FORCE_IOMEM 0
77321 +#define ADDR_SPACE_PERCPU 0
77322 +#define ADDR_SPACE_FORCE_PERCPU 0
77323 +#define ADDR_SPACE_RCU 0
77324 +#define ADDR_SPACE_FORCE_RCU 0
77325 +
77326 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77327 +{
77328 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77329 +}
77330 +
77331 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77332 +{
77333 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77334 +}
77335 +
77336 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77337 +{
77338 + return default_addr_space_valid_pointer_mode(mode, as);
77339 +}
77340 +
77341 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77342 +{
77343 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77344 +}
77345 +
77346 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77347 +{
77348 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77349 +}
77350 +
77351 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77352 +{
77353 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77354 + return true;
77355 +
77356 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77357 + return true;
77358 +
77359 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77360 + return true;
77361 +
77362 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77363 + return true;
77364 +
77365 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77366 + return true;
77367 +
77368 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77369 + return true;
77370 +
77371 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77372 + return true;
77373 +
77374 + return subset == superset;
77375 +}
77376 +
77377 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77378 +{
77379 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77380 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77381 +
77382 + return op;
77383 +}
77384 +
77385 +static void register_checker_address_spaces(void *event_data, void *data)
77386 +{
77387 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77388 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77389 + c_register_addr_space("__user", ADDR_SPACE_USER);
77390 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77391 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77392 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77393 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77394 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77395 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77396 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77397 +
77398 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77399 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
77400 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77401 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77402 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77403 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
77404 + targetm.addr_space.convert = checker_addr_space_convert;
77405 +}
77406 +
77407 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77408 +{
77409 + const char * const plugin_name = plugin_info->base_name;
77410 + const int argc = plugin_info->argc;
77411 + const struct plugin_argument * const argv = plugin_info->argv;
77412 + int i;
77413 +
77414 + if (!plugin_default_version_check(version, &gcc_version)) {
77415 + error(G_("incompatible gcc/plugin versions"));
77416 + return 1;
77417 + }
77418 +
77419 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
77420 +
77421 + for (i = 0; i < argc; ++i)
77422 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77423 +
77424 + if (TARGET_64BIT == 0)
77425 + return 0;
77426 +
77427 + register_callback (plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
77428 +
77429 + return 0;
77430 +}
77431 diff -urNp linux-3.0.8/tools/gcc/constify_plugin.c linux-3.0.8/tools/gcc/constify_plugin.c
77432 --- linux-3.0.8/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
77433 +++ linux-3.0.8/tools/gcc/constify_plugin.c 2011-08-30 18:23:52.000000000 -0400
77434 @@ -0,0 +1,293 @@
77435 +/*
77436 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
77437 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
77438 + * Licensed under the GPL v2, or (at your option) v3
77439 + *
77440 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
77441 + *
77442 + * Homepage:
77443 + * http://www.grsecurity.net/~ephox/const_plugin/
77444 + *
77445 + * Usage:
77446 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
77447 + * $ gcc -fplugin=constify_plugin.so test.c -O2
77448 + */
77449 +
77450 +#include "gcc-plugin.h"
77451 +#include "config.h"
77452 +#include "system.h"
77453 +#include "coretypes.h"
77454 +#include "tree.h"
77455 +#include "tree-pass.h"
77456 +#include "intl.h"
77457 +#include "plugin-version.h"
77458 +#include "tm.h"
77459 +#include "toplev.h"
77460 +#include "function.h"
77461 +#include "tree-flow.h"
77462 +#include "plugin.h"
77463 +#include "diagnostic.h"
77464 +//#include "c-tree.h"
77465 +
77466 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
77467 +
77468 +int plugin_is_GPL_compatible;
77469 +
77470 +static struct plugin_info const_plugin_info = {
77471 + .version = "20110826",
77472 + .help = "no-constify\tturn off constification\n",
77473 +};
77474 +
77475 +static void constify_type(tree type);
77476 +static bool walk_struct(tree node);
77477 +
77478 +static tree deconstify_type(tree old_type)
77479 +{
77480 + tree new_type, field;
77481 +
77482 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
77483 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
77484 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
77485 + DECL_FIELD_CONTEXT(field) = new_type;
77486 + TYPE_READONLY(new_type) = 0;
77487 + C_TYPE_FIELDS_READONLY(new_type) = 0;
77488 + return new_type;
77489 +}
77490 +
77491 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77492 +{
77493 + tree type;
77494 +
77495 + *no_add_attrs = true;
77496 + if (TREE_CODE(*node) == FUNCTION_DECL) {
77497 + error("%qE attribute does not apply to functions", name);
77498 + return NULL_TREE;
77499 + }
77500 +
77501 + if (TREE_CODE(*node) == VAR_DECL) {
77502 + error("%qE attribute does not apply to variables", name);
77503 + return NULL_TREE;
77504 + }
77505 +
77506 + if (TYPE_P(*node)) {
77507 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
77508 + *no_add_attrs = false;
77509 + else
77510 + error("%qE attribute applies to struct and union types only", name);
77511 + return NULL_TREE;
77512 + }
77513 +
77514 + type = TREE_TYPE(*node);
77515 +
77516 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
77517 + error("%qE attribute applies to struct and union types only", name);
77518 + return NULL_TREE;
77519 + }
77520 +
77521 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
77522 + error("%qE attribute is already applied to the type", name);
77523 + return NULL_TREE;
77524 + }
77525 +
77526 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
77527 + error("%qE attribute used on type that is not constified", name);
77528 + return NULL_TREE;
77529 + }
77530 +
77531 + if (TREE_CODE(*node) == TYPE_DECL) {
77532 + TREE_TYPE(*node) = deconstify_type(type);
77533 + TREE_READONLY(*node) = 0;
77534 + return NULL_TREE;
77535 + }
77536 +
77537 + return NULL_TREE;
77538 +}
77539 +
77540 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77541 +{
77542 + *no_add_attrs = true;
77543 + if (!TYPE_P(*node)) {
77544 + error("%qE attribute applies to types only", name);
77545 + return NULL_TREE;
77546 + }
77547 +
77548 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
77549 + error("%qE attribute applies to struct and union types only", name);
77550 + return NULL_TREE;
77551 + }
77552 +
77553 + *no_add_attrs = false;
77554 + constify_type(*node);
77555 + return NULL_TREE;
77556 +}
77557 +
77558 +static struct attribute_spec no_const_attr = {
77559 + .name = "no_const",
77560 + .min_length = 0,
77561 + .max_length = 0,
77562 + .decl_required = false,
77563 + .type_required = false,
77564 + .function_type_required = false,
77565 + .handler = handle_no_const_attribute
77566 +};
77567 +
77568 +static struct attribute_spec do_const_attr = {
77569 + .name = "do_const",
77570 + .min_length = 0,
77571 + .max_length = 0,
77572 + .decl_required = false,
77573 + .type_required = false,
77574 + .function_type_required = false,
77575 + .handler = handle_do_const_attribute
77576 +};
77577 +
77578 +static void register_attributes(void *event_data, void *data)
77579 +{
77580 + register_attribute(&no_const_attr);
77581 + register_attribute(&do_const_attr);
77582 +}
77583 +
77584 +static void constify_type(tree type)
77585 +{
77586 + TYPE_READONLY(type) = 1;
77587 + C_TYPE_FIELDS_READONLY(type) = 1;
77588 +}
77589 +
77590 +static bool is_fptr(tree field)
77591 +{
77592 + tree ptr = TREE_TYPE(field);
77593 +
77594 + if (TREE_CODE(ptr) != POINTER_TYPE)
77595 + return false;
77596 +
77597 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77598 +}
77599 +
77600 +static bool walk_struct(tree node)
77601 +{
77602 + tree field;
77603 +
77604 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77605 + return false;
77606 +
77607 + if (TYPE_FIELDS(node) == NULL_TREE)
77608 + return false;
77609 +
77610 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77611 + tree type = TREE_TYPE(field);
77612 + enum tree_code code = TREE_CODE(type);
77613 + if (code == RECORD_TYPE || code == UNION_TYPE) {
77614 + if (!(walk_struct(type)))
77615 + return false;
77616 + } else if (!is_fptr(field) && !TREE_READONLY(field))
77617 + return false;
77618 + }
77619 + return true;
77620 +}
77621 +
77622 +static void finish_type(void *event_data, void *data)
77623 +{
77624 + tree type = (tree)event_data;
77625 +
77626 + if (type == NULL_TREE)
77627 + return;
77628 +
77629 + if (TYPE_READONLY(type))
77630 + return;
77631 +
77632 + if (walk_struct(type))
77633 + constify_type(type);
77634 +}
77635 +
77636 +static unsigned int check_local_variables(void);
77637 +
77638 +struct gimple_opt_pass pass_local_variable = {
77639 + {
77640 + .type = GIMPLE_PASS,
77641 + .name = "check_local_variables",
77642 + .gate = NULL,
77643 + .execute = check_local_variables,
77644 + .sub = NULL,
77645 + .next = NULL,
77646 + .static_pass_number = 0,
77647 + .tv_id = TV_NONE,
77648 + .properties_required = 0,
77649 + .properties_provided = 0,
77650 + .properties_destroyed = 0,
77651 + .todo_flags_start = 0,
77652 + .todo_flags_finish = 0
77653 + }
77654 +};
77655 +
77656 +static unsigned int check_local_variables(void)
77657 +{
77658 + tree var;
77659 + referenced_var_iterator rvi;
77660 +
77661 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
77662 + FOR_EACH_REFERENCED_VAR(var, rvi) {
77663 +#else
77664 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
77665 +#endif
77666 + tree type = TREE_TYPE(var);
77667 +
77668 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
77669 + continue;
77670 +
77671 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
77672 + continue;
77673 +
77674 + if (!TYPE_READONLY(type))
77675 + continue;
77676 +
77677 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
77678 +// continue;
77679 +
77680 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
77681 +// continue;
77682 +
77683 + if (walk_struct(type)) {
77684 + error("constified variable %qE cannot be local", var);
77685 + return 1;
77686 + }
77687 + }
77688 + return 0;
77689 +}
77690 +
77691 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77692 +{
77693 + const char * const plugin_name = plugin_info->base_name;
77694 + const int argc = plugin_info->argc;
77695 + const struct plugin_argument * const argv = plugin_info->argv;
77696 + int i;
77697 + bool constify = true;
77698 +
77699 + struct register_pass_info local_variable_pass_info = {
77700 + .pass = &pass_local_variable.pass,
77701 + .reference_pass_name = "*referenced_vars",
77702 + .ref_pass_instance_number = 0,
77703 + .pos_op = PASS_POS_INSERT_AFTER
77704 + };
77705 +
77706 + if (!plugin_default_version_check(version, &gcc_version)) {
77707 + error(G_("incompatible gcc/plugin versions"));
77708 + return 1;
77709 + }
77710 +
77711 + for (i = 0; i < argc; ++i) {
77712 + if (!(strcmp(argv[i].key, "no-constify"))) {
77713 + constify = false;
77714 + continue;
77715 + }
77716 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77717 + }
77718 +
77719 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77720 + if (constify) {
77721 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77722 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
77723 + }
77724 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77725 +
77726 + return 0;
77727 +}
77728 diff -urNp linux-3.0.8/tools/gcc/kallocstat_plugin.c linux-3.0.8/tools/gcc/kallocstat_plugin.c
77729 --- linux-3.0.8/tools/gcc/kallocstat_plugin.c 1969-12-31 19:00:00.000000000 -0500
77730 +++ linux-3.0.8/tools/gcc/kallocstat_plugin.c 2011-10-06 04:17:55.000000000 -0400
77731 @@ -0,0 +1,165 @@
77732 +/*
77733 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77734 + * Licensed under the GPL v2
77735 + *
77736 + * Note: the choice of the license means that the compilation process is
77737 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77738 + * but for the kernel it doesn't matter since it doesn't link against
77739 + * any of the gcc libraries
77740 + *
77741 + * gcc plugin to find the distribution of k*alloc sizes
77742 + *
77743 + * TODO:
77744 + *
77745 + * BUGS:
77746 + * - none known
77747 + */
77748 +#include "gcc-plugin.h"
77749 +#include "config.h"
77750 +#include "system.h"
77751 +#include "coretypes.h"
77752 +#include "tree.h"
77753 +#include "tree-pass.h"
77754 +#include "intl.h"
77755 +#include "plugin-version.h"
77756 +#include "tm.h"
77757 +#include "toplev.h"
77758 +#include "basic-block.h"
77759 +#include "gimple.h"
77760 +//#include "expr.h" where are you...
77761 +#include "diagnostic.h"
77762 +#include "rtl.h"
77763 +#include "emit-rtl.h"
77764 +#include "function.h"
77765 +
77766 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77767 +
77768 +int plugin_is_GPL_compatible;
77769 +
77770 +static const char * const kalloc_functions[] = {
77771 + "__kmalloc",
77772 + "kmalloc",
77773 + "kmalloc_large",
77774 + "kmalloc_node",
77775 + "kmalloc_order",
77776 + "kmalloc_order_trace",
77777 + "kmalloc_slab",
77778 + "kzalloc",
77779 + "kzalloc_node",
77780 +};
77781 +
77782 +static struct plugin_info kallocstat_plugin_info = {
77783 + .version = "201109121100",
77784 +};
77785 +
77786 +static unsigned int execute_kallocstat(void);
77787 +
77788 +static struct gimple_opt_pass kallocstat_pass = {
77789 + .pass = {
77790 + .type = GIMPLE_PASS,
77791 + .name = "kallocstat",
77792 + .gate = NULL,
77793 + .execute = execute_kallocstat,
77794 + .sub = NULL,
77795 + .next = NULL,
77796 + .static_pass_number = 0,
77797 + .tv_id = TV_NONE,
77798 + .properties_required = 0,
77799 + .properties_provided = 0,
77800 + .properties_destroyed = 0,
77801 + .todo_flags_start = 0,
77802 + .todo_flags_finish = 0
77803 + }
77804 +};
77805 +
77806 +static bool is_kalloc(const char *fnname)
77807 +{
77808 + size_t i;
77809 +
77810 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
77811 + if (!strcmp(fnname, kalloc_functions[i]))
77812 + return true;
77813 + return false;
77814 +}
77815 +
77816 +static unsigned int execute_kallocstat(void)
77817 +{
77818 + basic_block bb;
77819 +
77820 + // 1. loop through BBs and GIMPLE statements
77821 + FOR_EACH_BB(bb) {
77822 + gimple_stmt_iterator gsi;
77823 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77824 + // gimple match:
77825 + tree fndecl, size;
77826 + gimple call_stmt;
77827 + const char *fnname;
77828 +
77829 + // is it a call
77830 + call_stmt = gsi_stmt(gsi);
77831 + if (!is_gimple_call(call_stmt))
77832 + continue;
77833 + fndecl = gimple_call_fndecl(call_stmt);
77834 + if (fndecl == NULL_TREE)
77835 + continue;
77836 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
77837 + continue;
77838 +
77839 + // is it a call to k*alloc
77840 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
77841 + if (!is_kalloc(fnname))
77842 + continue;
77843 +
77844 + // is the size arg the result of a simple const assignment
77845 + size = gimple_call_arg(call_stmt, 0);
77846 + while (true) {
77847 + gimple def_stmt;
77848 + expanded_location xloc;
77849 + size_t size_val;
77850 +
77851 + if (TREE_CODE(size) != SSA_NAME)
77852 + break;
77853 + def_stmt = SSA_NAME_DEF_STMT(size);
77854 + if (!def_stmt || !is_gimple_assign(def_stmt))
77855 + break;
77856 + if (gimple_num_ops(def_stmt) != 2)
77857 + break;
77858 + size = gimple_assign_rhs1(def_stmt);
77859 + if (!TREE_CONSTANT(size))
77860 + continue;
77861 + xloc = expand_location(gimple_location(def_stmt));
77862 + if (!xloc.file)
77863 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
77864 + size_val = TREE_INT_CST_LOW(size);
77865 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
77866 + break;
77867 + }
77868 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
77869 +//debug_tree(gimple_call_fn(call_stmt));
77870 +//print_node(stderr, "pax", fndecl, 4);
77871 + }
77872 + }
77873 +
77874 + return 0;
77875 +}
77876 +
77877 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77878 +{
77879 + const char * const plugin_name = plugin_info->base_name;
77880 + struct register_pass_info kallocstat_pass_info = {
77881 + .pass = &kallocstat_pass.pass,
77882 + .reference_pass_name = "ssa",
77883 + .ref_pass_instance_number = 0,
77884 + .pos_op = PASS_POS_INSERT_AFTER
77885 + };
77886 +
77887 + if (!plugin_default_version_check(version, &gcc_version)) {
77888 + error(G_("incompatible gcc/plugin versions"));
77889 + return 1;
77890 + }
77891 +
77892 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
77893 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
77894 +
77895 + return 0;
77896 +}
77897 diff -urNp linux-3.0.8/tools/gcc/kernexec_plugin.c linux-3.0.8/tools/gcc/kernexec_plugin.c
77898 --- linux-3.0.8/tools/gcc/kernexec_plugin.c 1969-12-31 19:00:00.000000000 -0500
77899 +++ linux-3.0.8/tools/gcc/kernexec_plugin.c 2011-10-06 04:17:55.000000000 -0400
77900 @@ -0,0 +1,273 @@
77901 +/*
77902 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77903 + * Licensed under the GPL v2
77904 + *
77905 + * Note: the choice of the license means that the compilation process is
77906 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77907 + * but for the kernel it doesn't matter since it doesn't link against
77908 + * any of the gcc libraries
77909 + *
77910 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
77911 + *
77912 + * TODO:
77913 + *
77914 + * BUGS:
77915 + * - none known
77916 + */
77917 +#include "gcc-plugin.h"
77918 +#include "config.h"
77919 +#include "system.h"
77920 +#include "coretypes.h"
77921 +#include "tree.h"
77922 +#include "tree-pass.h"
77923 +#include "intl.h"
77924 +#include "plugin-version.h"
77925 +#include "tm.h"
77926 +#include "toplev.h"
77927 +#include "basic-block.h"
77928 +#include "gimple.h"
77929 +//#include "expr.h" where are you...
77930 +#include "diagnostic.h"
77931 +#include "rtl.h"
77932 +#include "emit-rtl.h"
77933 +#include "function.h"
77934 +#include "tree-flow.h"
77935 +
77936 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77937 +extern rtx emit_move_insn(rtx x, rtx y);
77938 +
77939 +int plugin_is_GPL_compatible;
77940 +
77941 +static struct plugin_info kernexec_plugin_info = {
77942 + .version = "201110032145",
77943 +};
77944 +
77945 +static unsigned int execute_kernexec_fptr(void);
77946 +static unsigned int execute_kernexec_retaddr(void);
77947 +static bool kernexec_cmodel_check(void);
77948 +
77949 +static struct gimple_opt_pass kernexec_fptr_pass = {
77950 + .pass = {
77951 + .type = GIMPLE_PASS,
77952 + .name = "kernexec_fptr",
77953 + .gate = kernexec_cmodel_check,
77954 + .execute = execute_kernexec_fptr,
77955 + .sub = NULL,
77956 + .next = NULL,
77957 + .static_pass_number = 0,
77958 + .tv_id = TV_NONE,
77959 + .properties_required = 0,
77960 + .properties_provided = 0,
77961 + .properties_destroyed = 0,
77962 + .todo_flags_start = 0,
77963 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
77964 + }
77965 +};
77966 +
77967 +static struct rtl_opt_pass kernexec_retaddr_pass = {
77968 + .pass = {
77969 + .type = RTL_PASS,
77970 + .name = "kernexec_retaddr",
77971 + .gate = kernexec_cmodel_check,
77972 + .execute = execute_kernexec_retaddr,
77973 + .sub = NULL,
77974 + .next = NULL,
77975 + .static_pass_number = 0,
77976 + .tv_id = TV_NONE,
77977 + .properties_required = 0,
77978 + .properties_provided = 0,
77979 + .properties_destroyed = 0,
77980 + .todo_flags_start = 0,
77981 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
77982 + }
77983 +};
77984 +
77985 +static bool kernexec_cmodel_check(void)
77986 +{
77987 + tree section;
77988 +
77989 + if (ix86_cmodel != CM_KERNEL)
77990 + return false;
77991 +
77992 + section = lookup_attribute("__section__", DECL_ATTRIBUTES(current_function_decl));
77993 + if (!section || !TREE_VALUE(section))
77994 + return true;
77995 +
77996 + section = TREE_VALUE(TREE_VALUE(section));
77997 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
77998 + return true;
77999 +
78000 + return false;
78001 +}
78002 +
78003 +/*
78004 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78005 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78006 + */
78007 +static void kernexec_instrument_fptr(gimple_stmt_iterator gsi)
78008 +{
78009 + gimple assign_intptr, assign_new_fptr, call_stmt;
78010 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78011 +
78012 + call_stmt = gsi_stmt(gsi);
78013 + old_fptr = gimple_call_fn(call_stmt);
78014 +
78015 + // create temporary unsigned long variable used for bitops and cast fptr to it
78016 + intptr = create_tmp_var(long_unsigned_type_node, NULL);
78017 + add_referenced_var(intptr);
78018 + mark_sym_for_renaming(intptr);
78019 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78020 + update_stmt(assign_intptr);
78021 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
78022 +
78023 + // apply logical or to temporary unsigned long and bitmask
78024 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78025 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78026 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78027 + update_stmt(assign_intptr);
78028 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
78029 +
78030 + // cast temporary unsigned long back to a temporary fptr variable
78031 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), NULL);
78032 + add_referenced_var(new_fptr);
78033 + mark_sym_for_renaming(new_fptr);
78034 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78035 + update_stmt(assign_new_fptr);
78036 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
78037 +
78038 + // replace call stmt fn with the new fptr
78039 + gimple_call_set_fn(call_stmt, new_fptr);
78040 + update_stmt(call_stmt);
78041 +}
78042 +
78043 +/*
78044 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78045 + */
78046 +static unsigned int execute_kernexec_fptr(void)
78047 +{
78048 + basic_block bb;
78049 + gimple_stmt_iterator gsi;
78050 +
78051 + // 1. loop through BBs and GIMPLE statements
78052 + FOR_EACH_BB(bb) {
78053 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78054 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78055 + tree fn;
78056 + gimple call_stmt;
78057 +
78058 + // is it a call ...
78059 + call_stmt = gsi_stmt(gsi);
78060 + if (!is_gimple_call(call_stmt))
78061 + continue;
78062 + fn = gimple_call_fn(call_stmt);
78063 + if (TREE_CODE(fn) == ADDR_EXPR)
78064 + continue;
78065 + if (TREE_CODE(fn) != SSA_NAME)
78066 + gcc_unreachable();
78067 +
78068 + // ... through a function pointer
78069 + fn = SSA_NAME_VAR(fn);
78070 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78071 + continue;
78072 + fn = TREE_TYPE(fn);
78073 + if (TREE_CODE(fn) != POINTER_TYPE)
78074 + continue;
78075 + fn = TREE_TYPE(fn);
78076 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78077 + continue;
78078 +
78079 + kernexec_instrument_fptr(gsi);
78080 +
78081 +//debug_tree(gimple_call_fn(call_stmt));
78082 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78083 + }
78084 + }
78085 +
78086 + return 0;
78087 +}
78088 +
78089 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78090 +static void kernexec_instrument_retaddr(rtx insn)
78091 +{
78092 + rtx btsq;
78093 + rtvec argvec, constraintvec, labelvec;
78094 + int line;
78095 +
78096 + // create asm volatile("btsq $63,(%%rsp)":::)
78097 + argvec = rtvec_alloc(0);
78098 + constraintvec = rtvec_alloc(0);
78099 + labelvec = rtvec_alloc(0);
78100 + line = expand_location(RTL_LOCATION(insn)).line;
78101 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78102 + MEM_VOLATILE_P(btsq) = 1;
78103 + RTX_FRAME_RELATED_P(btsq) = 1;
78104 + emit_insn_before(btsq, insn);
78105 +}
78106 +
78107 +/*
78108 + * find all asm level function returns and forcibly set the highest bit of the return address
78109 + */
78110 +static unsigned int execute_kernexec_retaddr(void)
78111 +{
78112 + rtx insn;
78113 +
78114 + // 1. find function returns
78115 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78116 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78117 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78118 + rtx body;
78119 +
78120 + // is it a retn
78121 + if (!JUMP_P(insn))
78122 + continue;
78123 + body = PATTERN(insn);
78124 + if (GET_CODE(body) == PARALLEL)
78125 + body = XVECEXP(body, 0, 0);
78126 + if (GET_CODE(body) != RETURN)
78127 + continue;
78128 + kernexec_instrument_retaddr(insn);
78129 + }
78130 +
78131 +// print_simple_rtl(stderr, get_insns());
78132 +// print_rtl(stderr, get_insns());
78133 +
78134 + return 0;
78135 +}
78136 +
78137 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78138 +{
78139 + const char * const plugin_name = plugin_info->base_name;
78140 + const int argc = plugin_info->argc;
78141 + const struct plugin_argument * const argv = plugin_info->argv;
78142 + int i;
78143 + struct register_pass_info kernexec_fptr_pass_info = {
78144 + .pass = &kernexec_fptr_pass.pass,
78145 + .reference_pass_name = "ssa",
78146 + .ref_pass_instance_number = 0,
78147 + .pos_op = PASS_POS_INSERT_AFTER
78148 + };
78149 + struct register_pass_info kernexec_retaddr_pass_info = {
78150 + .pass = &kernexec_retaddr_pass.pass,
78151 + .reference_pass_name = "pro_and_epilogue",
78152 + .ref_pass_instance_number = 0,
78153 + .pos_op = PASS_POS_INSERT_AFTER
78154 + };
78155 +
78156 + if (!plugin_default_version_check(version, &gcc_version)) {
78157 + error(G_("incompatible gcc/plugin versions"));
78158 + return 1;
78159 + }
78160 +
78161 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78162 +
78163 + for (i = 0; i < argc; ++i)
78164 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78165 +
78166 + if (TARGET_64BIT == 0)
78167 + return 0;
78168 +
78169 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78170 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78171 +
78172 + return 0;
78173 +}
78174 diff -urNp linux-3.0.8/tools/gcc/Makefile linux-3.0.8/tools/gcc/Makefile
78175 --- linux-3.0.8/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
78176 +++ linux-3.0.8/tools/gcc/Makefile 2011-10-06 04:17:55.000000000 -0400
78177 @@ -0,0 +1,21 @@
78178 +#CC := gcc
78179 +#PLUGIN_SOURCE_FILES := pax_plugin.c
78180 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
78181 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
78182 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
78183 +
78184 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
78185 +
78186 +hostlibs-y := constify_plugin.so
78187 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
78188 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
78189 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
78190 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
78191 +
78192 +always := $(hostlibs-y)
78193 +
78194 +stackleak_plugin-objs := stackleak_plugin.o
78195 +constify_plugin-objs := constify_plugin.o
78196 +kallocstat_plugin-objs := kallocstat_plugin.o
78197 +kernexec_plugin-objs := kernexec_plugin.o
78198 +checker_plugin-objs := checker_plugin.o
78199 diff -urNp linux-3.0.8/tools/gcc/stackleak_plugin.c linux-3.0.8/tools/gcc/stackleak_plugin.c
78200 --- linux-3.0.8/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
78201 +++ linux-3.0.8/tools/gcc/stackleak_plugin.c 2011-09-17 00:53:44.000000000 -0400
78202 @@ -0,0 +1,251 @@
78203 +/*
78204 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78205 + * Licensed under the GPL v2
78206 + *
78207 + * Note: the choice of the license means that the compilation process is
78208 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78209 + * but for the kernel it doesn't matter since it doesn't link against
78210 + * any of the gcc libraries
78211 + *
78212 + * gcc plugin to help implement various PaX features
78213 + *
78214 + * - track lowest stack pointer
78215 + *
78216 + * TODO:
78217 + * - initialize all local variables
78218 + *
78219 + * BUGS:
78220 + * - none known
78221 + */
78222 +#include "gcc-plugin.h"
78223 +#include "config.h"
78224 +#include "system.h"
78225 +#include "coretypes.h"
78226 +#include "tree.h"
78227 +#include "tree-pass.h"
78228 +#include "intl.h"
78229 +#include "plugin-version.h"
78230 +#include "tm.h"
78231 +#include "toplev.h"
78232 +#include "basic-block.h"
78233 +#include "gimple.h"
78234 +//#include "expr.h" where are you...
78235 +#include "diagnostic.h"
78236 +#include "rtl.h"
78237 +#include "emit-rtl.h"
78238 +#include "function.h"
78239 +
78240 +int plugin_is_GPL_compatible;
78241 +
78242 +static int track_frame_size = -1;
78243 +static const char track_function[] = "pax_track_stack";
78244 +static bool init_locals;
78245 +
78246 +static struct plugin_info stackleak_plugin_info = {
78247 + .version = "201109112100",
78248 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78249 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
78250 +};
78251 +
78252 +static bool gate_stackleak_track_stack(void);
78253 +static unsigned int execute_stackleak_tree_instrument(void);
78254 +static unsigned int execute_stackleak_final(void);
78255 +
78256 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78257 + .pass = {
78258 + .type = GIMPLE_PASS,
78259 + .name = "stackleak_tree_instrument",
78260 + .gate = gate_stackleak_track_stack,
78261 + .execute = execute_stackleak_tree_instrument,
78262 + .sub = NULL,
78263 + .next = NULL,
78264 + .static_pass_number = 0,
78265 + .tv_id = TV_NONE,
78266 + .properties_required = PROP_gimple_leh | PROP_cfg,
78267 + .properties_provided = 0,
78268 + .properties_destroyed = 0,
78269 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
78270 + .todo_flags_finish = TODO_verify_stmts | TODO_dump_func
78271 + }
78272 +};
78273 +
78274 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
78275 + .pass = {
78276 + .type = RTL_PASS,
78277 + .name = "stackleak_final",
78278 + .gate = gate_stackleak_track_stack,
78279 + .execute = execute_stackleak_final,
78280 + .sub = NULL,
78281 + .next = NULL,
78282 + .static_pass_number = 0,
78283 + .tv_id = TV_NONE,
78284 + .properties_required = 0,
78285 + .properties_provided = 0,
78286 + .properties_destroyed = 0,
78287 + .todo_flags_start = 0,
78288 + .todo_flags_finish = TODO_dump_func
78289 + }
78290 +};
78291 +
78292 +static bool gate_stackleak_track_stack(void)
78293 +{
78294 + return track_frame_size >= 0;
78295 +}
78296 +
78297 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
78298 +{
78299 + gimple call;
78300 + tree fndecl, type;
78301 +
78302 + // insert call to void pax_track_stack(void)
78303 + type = build_function_type_list(void_type_node, NULL_TREE);
78304 + fndecl = build_fn_decl(track_function, type);
78305 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
78306 + call = gimple_build_call(fndecl, 0);
78307 + if (before)
78308 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
78309 + else
78310 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
78311 +}
78312 +
78313 +static unsigned int execute_stackleak_tree_instrument(void)
78314 +{
78315 + basic_block bb, entry_bb;
78316 + gimple_stmt_iterator gsi;
78317 + bool prologue_instrumented = false;
78318 +
78319 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
78320 +
78321 + // 1. loop through BBs and GIMPLE statements
78322 + FOR_EACH_BB(bb) {
78323 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78324 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
78325 + tree fndecl;
78326 + gimple stmt = gsi_stmt(gsi);
78327 +
78328 + if (!is_gimple_call(stmt))
78329 + continue;
78330 + fndecl = gimple_call_fndecl(stmt);
78331 + if (!fndecl)
78332 + continue;
78333 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
78334 + continue;
78335 + if (!DECL_BUILT_IN(fndecl))
78336 + continue;
78337 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
78338 + continue;
78339 + if (DECL_FUNCTION_CODE(fndecl) != BUILT_IN_ALLOCA)
78340 + continue;
78341 +
78342 + // 2. insert track call after each __builtin_alloca call
78343 + stackleak_add_instrumentation(&gsi, false);
78344 + if (bb == entry_bb)
78345 + prologue_instrumented = true;
78346 +// print_node(stderr, "pax", fndecl, 4);
78347 + }
78348 + }
78349 +
78350 + // 3. insert track call at the beginning
78351 + if (!prologue_instrumented) {
78352 + gsi = gsi_start_bb(entry_bb);
78353 + stackleak_add_instrumentation(&gsi, true);
78354 + }
78355 +
78356 + return 0;
78357 +}
78358 +
78359 +static unsigned int execute_stackleak_final(void)
78360 +{
78361 + rtx insn;
78362 +
78363 + if (cfun->calls_alloca)
78364 + return 0;
78365 +
78366 + // keep calls only if function frame is big enough
78367 + if (get_frame_size() >= track_frame_size)
78368 + return 0;
78369 +
78370 + // 1. find pax_track_stack calls
78371 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78372 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
78373 + rtx body;
78374 +
78375 + if (!CALL_P(insn))
78376 + continue;
78377 + body = PATTERN(insn);
78378 + if (GET_CODE(body) != CALL)
78379 + continue;
78380 + body = XEXP(body, 0);
78381 + if (GET_CODE(body) != MEM)
78382 + continue;
78383 + body = XEXP(body, 0);
78384 + if (GET_CODE(body) != SYMBOL_REF)
78385 + continue;
78386 + if (strcmp(XSTR(body, 0), track_function))
78387 + continue;
78388 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78389 + // 2. delete call
78390 + delete_insn_and_edges(insn);
78391 + }
78392 +
78393 +// print_simple_rtl(stderr, get_insns());
78394 +// print_rtl(stderr, get_insns());
78395 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78396 +
78397 + return 0;
78398 +}
78399 +
78400 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78401 +{
78402 + const char * const plugin_name = plugin_info->base_name;
78403 + const int argc = plugin_info->argc;
78404 + const struct plugin_argument * const argv = plugin_info->argv;
78405 + int i;
78406 + struct register_pass_info stackleak_tree_instrument_pass_info = {
78407 + .pass = &stackleak_tree_instrument_pass.pass,
78408 +// .reference_pass_name = "tree_profile",
78409 + .reference_pass_name = "optimized",
78410 + .ref_pass_instance_number = 0,
78411 + .pos_op = PASS_POS_INSERT_AFTER
78412 + };
78413 + struct register_pass_info stackleak_final_pass_info = {
78414 + .pass = &stackleak_final_rtl_opt_pass.pass,
78415 + .reference_pass_name = "final",
78416 + .ref_pass_instance_number = 0,
78417 + .pos_op = PASS_POS_INSERT_BEFORE
78418 + };
78419 +
78420 + if (!plugin_default_version_check(version, &gcc_version)) {
78421 + error(G_("incompatible gcc/plugin versions"));
78422 + return 1;
78423 + }
78424 +
78425 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
78426 +
78427 + for (i = 0; i < argc; ++i) {
78428 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
78429 + if (!argv[i].value) {
78430 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78431 + continue;
78432 + }
78433 + track_frame_size = atoi(argv[i].value);
78434 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
78435 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78436 + continue;
78437 + }
78438 + if (!strcmp(argv[i].key, "initialize-locals")) {
78439 + if (argv[i].value) {
78440 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78441 + continue;
78442 + }
78443 + init_locals = true;
78444 + continue;
78445 + }
78446 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78447 + }
78448 +
78449 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
78450 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
78451 +
78452 + return 0;
78453 +}
78454 diff -urNp linux-3.0.8/tools/perf/util/include/asm/alternative-asm.h linux-3.0.8/tools/perf/util/include/asm/alternative-asm.h
78455 --- linux-3.0.8/tools/perf/util/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
78456 +++ linux-3.0.8/tools/perf/util/include/asm/alternative-asm.h 2011-10-20 04:46:01.000000000 -0400
78457 @@ -5,4 +5,7 @@
78458
78459 #define altinstruction_entry #
78460
78461 + .macro pax_force_retaddr rip=0
78462 + .endm
78463 +
78464 #endif
78465 diff -urNp linux-3.0.8/usr/gen_init_cpio.c linux-3.0.8/usr/gen_init_cpio.c
78466 --- linux-3.0.8/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
78467 +++ linux-3.0.8/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
78468 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
78469 int retval;
78470 int rc = -1;
78471 int namesize;
78472 - int i;
78473 + unsigned int i;
78474
78475 mode |= S_IFREG;
78476
78477 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
78478 *env_var = *expanded = '\0';
78479 strncat(env_var, start + 2, end - start - 2);
78480 strncat(expanded, new_location, start - new_location);
78481 - strncat(expanded, getenv(env_var), PATH_MAX);
78482 - strncat(expanded, end + 1, PATH_MAX);
78483 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
78484 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
78485 strncpy(new_location, expanded, PATH_MAX);
78486 + new_location[PATH_MAX] = 0;
78487 } else
78488 break;
78489 }
78490 diff -urNp linux-3.0.8/virt/kvm/kvm_main.c linux-3.0.8/virt/kvm/kvm_main.c
78491 --- linux-3.0.8/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
78492 +++ linux-3.0.8/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
78493 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
78494
78495 static cpumask_var_t cpus_hardware_enabled;
78496 static int kvm_usage_count = 0;
78497 -static atomic_t hardware_enable_failed;
78498 +static atomic_unchecked_t hardware_enable_failed;
78499
78500 struct kmem_cache *kvm_vcpu_cache;
78501 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
78502 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
78503
78504 if (r) {
78505 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
78506 - atomic_inc(&hardware_enable_failed);
78507 + atomic_inc_unchecked(&hardware_enable_failed);
78508 printk(KERN_INFO "kvm: enabling virtualization on "
78509 "CPU%d failed\n", cpu);
78510 }
78511 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
78512
78513 kvm_usage_count++;
78514 if (kvm_usage_count == 1) {
78515 - atomic_set(&hardware_enable_failed, 0);
78516 + atomic_set_unchecked(&hardware_enable_failed, 0);
78517 on_each_cpu(hardware_enable_nolock, NULL, 1);
78518
78519 - if (atomic_read(&hardware_enable_failed)) {
78520 + if (atomic_read_unchecked(&hardware_enable_failed)) {
78521 hardware_disable_all_nolock();
78522 r = -EBUSY;
78523 }
78524 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
78525 kvm_arch_vcpu_put(vcpu);
78526 }
78527
78528 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78529 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78530 struct module *module)
78531 {
78532 int r;
78533 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
78534 if (!vcpu_align)
78535 vcpu_align = __alignof__(struct kvm_vcpu);
78536 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
78537 - 0, NULL);
78538 + SLAB_USERCOPY, NULL);
78539 if (!kvm_vcpu_cache) {
78540 r = -ENOMEM;
78541 goto out_free_3;
78542 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
78543 if (r)
78544 goto out_free;
78545
78546 - kvm_chardev_ops.owner = module;
78547 - kvm_vm_fops.owner = module;
78548 - kvm_vcpu_fops.owner = module;
78549 + pax_open_kernel();
78550 + *(void **)&kvm_chardev_ops.owner = module;
78551 + *(void **)&kvm_vm_fops.owner = module;
78552 + *(void **)&kvm_vcpu_fops.owner = module;
78553 + pax_close_kernel();
78554
78555 r = misc_register(&kvm_dev);
78556 if (r) {