]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.0.4-201109252223.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.4-201109252223.patch
1 diff -urNp linux-3.0.4/arch/alpha/include/asm/elf.h linux-3.0.4/arch/alpha/include/asm/elf.h
2 --- linux-3.0.4/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3 +++ linux-3.0.4/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-3.0.4/arch/alpha/include/asm/pgtable.h linux-3.0.4/arch/alpha/include/asm/pgtable.h
19 --- linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20 +++ linux-3.0.4/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-3.0.4/arch/alpha/kernel/module.c linux-3.0.4/arch/alpha/kernel/module.c
40 --- linux-3.0.4/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41 +++ linux-3.0.4/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-3.0.4/arch/alpha/kernel/osf_sys.c linux-3.0.4/arch/alpha/kernel/osf_sys.c
52 --- linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53 +++ linux-3.0.4/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-3.0.4/arch/alpha/mm/fault.c linux-3.0.4/arch/alpha/mm/fault.c
86 --- linux-3.0.4/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87 +++ linux-3.0.4/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-3.0.4/arch/arm/include/asm/elf.h linux-3.0.4/arch/arm/include/asm/elf.h
245 --- linux-3.0.4/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246 +++ linux-3.0.4/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-3.0.4/arch/arm/include/asm/kmap_types.h linux-3.0.4/arch/arm/include/asm/kmap_types.h
275 --- linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276 +++ linux-3.0.4/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-3.0.4/arch/arm/include/asm/uaccess.h linux-3.0.4/arch/arm/include/asm/uaccess.h
286 --- linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287 +++ linux-3.0.4/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-3.0.4/arch/arm/kernel/armksyms.c linux-3.0.4/arch/arm/kernel/armksyms.c
344 --- linux-3.0.4/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345 +++ linux-3.0.4/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-3.0.4/arch/arm/kernel/process.c linux-3.0.4/arch/arm/kernel/process.c
358 --- linux-3.0.4/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359 +++ linux-3.0.4/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-3.0.4/arch/arm/kernel/traps.c linux-3.0.4/arch/arm/kernel/traps.c
382 --- linux-3.0.4/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383 +++ linux-3.0.4/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384 @@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-3.0.4/arch/arm/lib/copy_from_user.S linux-3.0.4/arch/arm/lib/copy_from_user.S
404 --- linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405 +++ linux-3.0.4/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-3.0.4/arch/arm/lib/copy_to_user.S linux-3.0.4/arch/arm/lib/copy_to_user.S
430 --- linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431 +++ linux-3.0.4/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-3.0.4/arch/arm/lib/uaccess.S linux-3.0.4/arch/arm/lib/uaccess.S
456 --- linux-3.0.4/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457 +++ linux-3.0.4/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513 +++ linux-3.0.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525 +++ linux-3.0.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-3.0.4/arch/arm/mm/fault.c linux-3.0.4/arch/arm/mm/fault.c
536 --- linux-3.0.4/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537 +++ linux-3.0.4/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-3.0.4/arch/arm/mm/mmap.c linux-3.0.4/arch/arm/mm/mmap.c
587 --- linux-3.0.4/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588 +++ linux-3.0.4/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-3.0.4/arch/avr32/include/asm/elf.h linux-3.0.4/arch/avr32/include/asm/elf.h
639 --- linux-3.0.4/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640 +++ linux-3.0.4/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-3.0.4/arch/avr32/include/asm/kmap_types.h linux-3.0.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659 +++ linux-3.0.4/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-3.0.4/arch/avr32/mm/fault.c linux-3.0.4/arch/avr32/mm/fault.c
671 --- linux-3.0.4/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672 +++ linux-3.0.4/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-3.0.4/arch/frv/include/asm/kmap_types.h linux-3.0.4/arch/frv/include/asm/kmap_types.h
715 --- linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716 +++ linux-3.0.4/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-3.0.4/arch/frv/mm/elf-fdpic.c linux-3.0.4/arch/frv/mm/elf-fdpic.c
726 --- linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727 +++ linux-3.0.4/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-3.0.4/arch/ia64/include/asm/elf.h linux-3.0.4/arch/ia64/include/asm/elf.h
757 --- linux-3.0.4/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758 +++ linux-3.0.4/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-3.0.4/arch/ia64/include/asm/pgtable.h linux-3.0.4/arch/ia64/include/asm/pgtable.h
774 --- linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775 +++ linux-3.0.4/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-3.0.4/arch/ia64/include/asm/spinlock.h linux-3.0.4/arch/ia64/include/asm/spinlock.h
804 --- linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805 +++ linux-3.0.4/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-3.0.4/arch/ia64/include/asm/uaccess.h linux-3.0.4/arch/ia64/include/asm/uaccess.h
816 --- linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817 +++ linux-3.0.4/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-3.0.4/arch/ia64/kernel/module.c linux-3.0.4/arch/ia64/kernel/module.c
837 --- linux-3.0.4/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838 +++ linux-3.0.4/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-3.0.4/arch/ia64/kernel/sys_ia64.c linux-3.0.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929 +++ linux-3.0.4/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964 +++ linux-3.0.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-3.0.4/arch/ia64/mm/fault.c linux-3.0.4/arch/ia64/mm/fault.c
975 --- linux-3.0.4/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976 +++ linux-3.0.4/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-3.0.4/arch/ia64/mm/hugetlbpage.c linux-3.0.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028 +++ linux-3.0.4/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-3.0.4/arch/ia64/mm/init.c linux-3.0.4/arch/ia64/mm/init.c
1039 --- linux-3.0.4/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040 +++ linux-3.0.4/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-3.0.4/arch/m32r/lib/usercopy.c linux-3.0.4/arch/m32r/lib/usercopy.c
1062 --- linux-3.0.4/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063 +++ linux-3.0.4/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-3.0.4/arch/mips/include/asm/elf.h linux-3.0.4/arch/mips/include/asm/elf.h
1085 --- linux-3.0.4/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086 +++ linux-3.0.4/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-3.0.4/arch/mips/include/asm/page.h linux-3.0.4/arch/mips/include/asm/page.h
1109 --- linux-3.0.4/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110 +++ linux-3.0.4/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-3.0.4/arch/mips/include/asm/system.h linux-3.0.4/arch/mips/include/asm/system.h
1121 --- linux-3.0.4/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122 +++ linux-3.0.4/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150 +++ linux-3.0.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-3.0.4/arch/mips/kernel/process.c linux-3.0.4/arch/mips/kernel/process.c
1166 --- linux-3.0.4/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167 +++ linux-3.0.4/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-3.0.4/arch/mips/mm/fault.c linux-3.0.4/arch/mips/mm/fault.c
1185 --- linux-3.0.4/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186 +++ linux-3.0.4/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187 @@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191 +#ifdef CONFIG_PAX_PAGEEXEC
1192 +void pax_report_insns(void *pc, void *sp)
1193 +{
1194 + unsigned long i;
1195 +
1196 + printk(KERN_ERR "PAX: bytes at PC: ");
1197 + for (i = 0; i < 5; i++) {
1198 + unsigned int c;
1199 + if (get_user(c, (unsigned int *)pc+i))
1200 + printk(KERN_CONT "???????? ");
1201 + else
1202 + printk(KERN_CONT "%08x ", c);
1203 + }
1204 + printk("\n");
1205 +}
1206 +#endif
1207 +
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211 diff -urNp linux-3.0.4/arch/mips/mm/mmap.c linux-3.0.4/arch/mips/mm/mmap.c
1212 --- linux-3.0.4/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213 +++ linux-3.0.4/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214 @@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218 +
1219 +#ifdef CONFIG_PAX_RANDMMAP
1220 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221 +#endif
1222 +
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229 - if (TASK_SIZE - len >= addr &&
1230 - (!vmm || addr + len <= vmm->vm_start))
1231 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235 @@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239 - if (!vmm || addr + len <= vmm->vm_start)
1240 + if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244 @@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248 -
1249 -static inline unsigned long brk_rnd(void)
1250 -{
1251 - unsigned long rnd = get_random_int();
1252 -
1253 - rnd = rnd << PAGE_SHIFT;
1254 - /* 8MB for 32bit, 256MB for 64bit */
1255 - if (TASK_IS_32BIT_ADDR)
1256 - rnd = rnd & 0x7ffffful;
1257 - else
1258 - rnd = rnd & 0xffffffful;
1259 -
1260 - return rnd;
1261 -}
1262 -
1263 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1264 -{
1265 - unsigned long base = mm->brk;
1266 - unsigned long ret;
1267 -
1268 - ret = PAGE_ALIGN(base + brk_rnd());
1269 -
1270 - if (ret < mm->brk)
1271 - return mm->brk;
1272 -
1273 - return ret;
1274 -}
1275 diff -urNp linux-3.0.4/arch/parisc/include/asm/elf.h linux-3.0.4/arch/parisc/include/asm/elf.h
1276 --- linux-3.0.4/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277 +++ linux-3.0.4/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282 +#ifdef CONFIG_PAX_ASLR
1283 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284 +
1285 +#define PAX_DELTA_MMAP_LEN 16
1286 +#define PAX_DELTA_STACK_LEN 16
1287 +#endif
1288 +
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292 diff -urNp linux-3.0.4/arch/parisc/include/asm/pgtable.h linux-3.0.4/arch/parisc/include/asm/pgtable.h
1293 --- linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294 +++ linux-3.0.4/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295 @@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299 +
1300 +#ifdef CONFIG_PAX_PAGEEXEC
1301 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304 +#else
1305 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306 +# define PAGE_COPY_NOEXEC PAGE_COPY
1307 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308 +#endif
1309 +
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313 diff -urNp linux-3.0.4/arch/parisc/kernel/module.c linux-3.0.4/arch/parisc/kernel/module.c
1314 --- linux-3.0.4/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315 +++ linux-3.0.4/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316 @@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320 +static inline int in_init_rx(struct module *me, void *loc)
1321 +{
1322 + return (loc >= me->module_init_rx &&
1323 + loc < (me->module_init_rx + me->init_size_rx));
1324 +}
1325 +
1326 +static inline int in_init_rw(struct module *me, void *loc)
1327 +{
1328 + return (loc >= me->module_init_rw &&
1329 + loc < (me->module_init_rw + me->init_size_rw));
1330 +}
1331 +
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334 - return (loc >= me->module_init &&
1335 - loc <= (me->module_init + me->init_size));
1336 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1337 +}
1338 +
1339 +static inline int in_core_rx(struct module *me, void *loc)
1340 +{
1341 + return (loc >= me->module_core_rx &&
1342 + loc < (me->module_core_rx + me->core_size_rx));
1343 +}
1344 +
1345 +static inline int in_core_rw(struct module *me, void *loc)
1346 +{
1347 + return (loc >= me->module_core_rw &&
1348 + loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353 - return (loc >= me->module_core &&
1354 - loc <= (me->module_core + me->core_size));
1355 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363 - me->core_size = ALIGN(me->core_size, 16);
1364 - me->arch.got_offset = me->core_size;
1365 - me->core_size += gots * sizeof(struct got_entry);
1366 -
1367 - me->core_size = ALIGN(me->core_size, 16);
1368 - me->arch.fdesc_offset = me->core_size;
1369 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1370 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371 + me->arch.got_offset = me->core_size_rw;
1372 + me->core_size_rw += gots * sizeof(struct got_entry);
1373 +
1374 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375 + me->arch.fdesc_offset = me->core_size_rw;
1376 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384 - got = me->module_core + me->arch.got_offset;
1385 + got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407 @@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416 diff -urNp linux-3.0.4/arch/parisc/kernel/sys_parisc.c linux-3.0.4/arch/parisc/kernel/sys_parisc.c
1417 --- linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418 +++ linux-3.0.4/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423 - if (!vma || addr + len <= vma->vm_start)
1424 + if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432 - if (!vma || addr + len <= vma->vm_start)
1433 + if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441 - addr = TASK_UNMAPPED_BASE;
1442 + addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446 diff -urNp linux-3.0.4/arch/parisc/kernel/traps.c linux-3.0.4/arch/parisc/kernel/traps.c
1447 --- linux-3.0.4/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448 +++ linux-3.0.4/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1454 - && (vma->vm_flags & VM_EXEC)) {
1455 -
1456 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460 diff -urNp linux-3.0.4/arch/parisc/mm/fault.c linux-3.0.4/arch/parisc/mm/fault.c
1461 --- linux-3.0.4/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462 +++ linux-3.0.4/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463 @@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467 +#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475 - if (code == 6 || code == 16)
1476 + if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484 +#ifdef CONFIG_PAX_PAGEEXEC
1485 +/*
1486 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487 + *
1488 + * returns 1 when task should be killed
1489 + * 2 when rt_sigreturn trampoline was detected
1490 + * 3 when unpatched PLT trampoline was detected
1491 + */
1492 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1493 +{
1494 +
1495 +#ifdef CONFIG_PAX_EMUPLT
1496 + int err;
1497 +
1498 + do { /* PaX: unpatched PLT emulation */
1499 + unsigned int bl, depwi;
1500 +
1501 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503 +
1504 + if (err)
1505 + break;
1506 +
1507 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509 +
1510 + err = get_user(ldw, (unsigned int *)addr);
1511 + err |= get_user(bv, (unsigned int *)(addr+4));
1512 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1513 +
1514 + if (err)
1515 + break;
1516 +
1517 + if (ldw == 0x0E801096U &&
1518 + bv == 0xEAC0C000U &&
1519 + ldw2 == 0x0E881095U)
1520 + {
1521 + unsigned int resolver, map;
1522 +
1523 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525 + if (err)
1526 + break;
1527 +
1528 + regs->gr[20] = instruction_pointer(regs)+8;
1529 + regs->gr[21] = map;
1530 + regs->gr[22] = resolver;
1531 + regs->iaoq[0] = resolver | 3UL;
1532 + regs->iaoq[1] = regs->iaoq[0] + 4;
1533 + return 3;
1534 + }
1535 + }
1536 + } while (0);
1537 +#endif
1538 +
1539 +#ifdef CONFIG_PAX_EMUTRAMP
1540 +
1541 +#ifndef CONFIG_PAX_EMUSIGRT
1542 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543 + return 1;
1544 +#endif
1545 +
1546 + do { /* PaX: rt_sigreturn emulation */
1547 + unsigned int ldi1, ldi2, bel, nop;
1548 +
1549 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553 +
1554 + if (err)
1555 + break;
1556 +
1557 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558 + ldi2 == 0x3414015AU &&
1559 + bel == 0xE4008200U &&
1560 + nop == 0x08000240U)
1561 + {
1562 + regs->gr[25] = (ldi1 & 2) >> 1;
1563 + regs->gr[20] = __NR_rt_sigreturn;
1564 + regs->gr[31] = regs->iaoq[1] + 16;
1565 + regs->sr[0] = regs->iasq[1];
1566 + regs->iaoq[0] = 0x100UL;
1567 + regs->iaoq[1] = regs->iaoq[0] + 4;
1568 + regs->iasq[0] = regs->sr[2];
1569 + regs->iasq[1] = regs->sr[2];
1570 + return 2;
1571 + }
1572 + } while (0);
1573 +#endif
1574 +
1575 + return 1;
1576 +}
1577 +
1578 +void pax_report_insns(void *pc, void *sp)
1579 +{
1580 + unsigned long i;
1581 +
1582 + printk(KERN_ERR "PAX: bytes at PC: ");
1583 + for (i = 0; i < 5; i++) {
1584 + unsigned int c;
1585 + if (get_user(c, (unsigned int *)pc+i))
1586 + printk(KERN_CONT "???????? ");
1587 + else
1588 + printk(KERN_CONT "%08x ", c);
1589 + }
1590 + printk("\n");
1591 +}
1592 +#endif
1593 +
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597 @@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601 - if ((vma->vm_flags & acc_type) != acc_type)
1602 + if ((vma->vm_flags & acc_type) != acc_type) {
1603 +
1604 +#ifdef CONFIG_PAX_PAGEEXEC
1605 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606 + (address & ~3UL) == instruction_pointer(regs))
1607 + {
1608 + up_read(&mm->mmap_sem);
1609 + switch (pax_handle_fetch_fault(regs)) {
1610 +
1611 +#ifdef CONFIG_PAX_EMUPLT
1612 + case 3:
1613 + return;
1614 +#endif
1615 +
1616 +#ifdef CONFIG_PAX_EMUTRAMP
1617 + case 2:
1618 + return;
1619 +#endif
1620 +
1621 + }
1622 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623 + do_group_exit(SIGKILL);
1624 + }
1625 +#endif
1626 +
1627 goto bad_area;
1628 + }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632 diff -urNp linux-3.0.4/arch/powerpc/include/asm/elf.h linux-3.0.4/arch/powerpc/include/asm/elf.h
1633 --- linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634 +++ linux-3.0.4/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639 -extern unsigned long randomize_et_dyn(unsigned long base);
1640 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641 +#define ELF_ET_DYN_BASE (0x20000000)
1642 +
1643 +#ifdef CONFIG_PAX_ASLR
1644 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645 +
1646 +#ifdef __powerpc64__
1647 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649 +#else
1650 +#define PAX_DELTA_MMAP_LEN 15
1651 +#define PAX_DELTA_STACK_LEN 15
1652 +#endif
1653 +#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662 -#define arch_randomize_brk arch_randomize_brk
1663 -
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667 diff -urNp linux-3.0.4/arch/powerpc/include/asm/kmap_types.h linux-3.0.4/arch/powerpc/include/asm/kmap_types.h
1668 --- linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669 +++ linux-3.0.4/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670 @@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674 + KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678 diff -urNp linux-3.0.4/arch/powerpc/include/asm/mman.h linux-3.0.4/arch/powerpc/include/asm/mman.h
1679 --- linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680 +++ linux-3.0.4/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page_64.h linux-3.0.4/arch/powerpc/include/asm/page_64.h
1691 --- linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692 +++ linux-3.0.4/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693 @@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699 +#define VM_STACK_DEFAULT_FLAGS32 \
1700 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706 +#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710 +#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714 diff -urNp linux-3.0.4/arch/powerpc/include/asm/page.h linux-3.0.4/arch/powerpc/include/asm/page.h
1715 --- linux-3.0.4/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716 +++ linux-3.0.4/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723 +#define VM_DATA_DEFAULT_FLAGS32 \
1724 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733 +#define ktla_ktva(addr) (addr)
1734 +#define ktva_ktla(addr) (addr)
1735 +
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pgtable.h linux-3.0.4/arch/powerpc/include/asm/pgtable.h
1740 --- linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741 +++ linux-3.0.4/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742 @@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746 +#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750 diff -urNp linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h
1751 --- linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752 +++ linux-3.0.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753 @@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757 +#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761 diff -urNp linux-3.0.4/arch/powerpc/include/asm/reg.h linux-3.0.4/arch/powerpc/include/asm/reg.h
1762 --- linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763 +++ linux-3.0.4/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764 @@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772 diff -urNp linux-3.0.4/arch/powerpc/include/asm/system.h linux-3.0.4/arch/powerpc/include/asm/system.h
1773 --- linux-3.0.4/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774 +++ linux-3.0.4/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779 -extern unsigned long arch_align_stack(unsigned long sp);
1780 +#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784 diff -urNp linux-3.0.4/arch/powerpc/include/asm/uaccess.h linux-3.0.4/arch/powerpc/include/asm/uaccess.h
1785 --- linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786 +++ linux-3.0.4/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787 @@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792 +
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796 @@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800 -#ifndef __powerpc64__
1801 -
1802 -static inline unsigned long copy_from_user(void *to,
1803 - const void __user *from, unsigned long n)
1804 -{
1805 - unsigned long over;
1806 -
1807 - if (access_ok(VERIFY_READ, from, n))
1808 - return __copy_tofrom_user((__force void __user *)to, from, n);
1809 - if ((unsigned long)from < TASK_SIZE) {
1810 - over = (unsigned long)from + n - TASK_SIZE;
1811 - return __copy_tofrom_user((__force void __user *)to, from,
1812 - n - over) + over;
1813 - }
1814 - return n;
1815 -}
1816 -
1817 -static inline unsigned long copy_to_user(void __user *to,
1818 - const void *from, unsigned long n)
1819 -{
1820 - unsigned long over;
1821 -
1822 - if (access_ok(VERIFY_WRITE, to, n))
1823 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1824 - if ((unsigned long)to < TASK_SIZE) {
1825 - over = (unsigned long)to + n - TASK_SIZE;
1826 - return __copy_tofrom_user(to, (__force void __user *)from,
1827 - n - over) + over;
1828 - }
1829 - return n;
1830 -}
1831 -
1832 -#else /* __powerpc64__ */
1833 -
1834 -#define __copy_in_user(to, from, size) \
1835 - __copy_tofrom_user((to), (from), (size))
1836 -
1837 -extern unsigned long copy_from_user(void *to, const void __user *from,
1838 - unsigned long n);
1839 -extern unsigned long copy_to_user(void __user *to, const void *from,
1840 - unsigned long n);
1841 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842 - unsigned long n);
1843 -
1844 -#endif /* __powerpc64__ */
1845 -
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853 +
1854 + if (!__builtin_constant_p(n))
1855 + check_object_size(to, n, false);
1856 +
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864 +
1865 + if (!__builtin_constant_p(n))
1866 + check_object_size(from, n, true);
1867 +
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875 +#ifndef __powerpc64__
1876 +
1877 +static inline unsigned long __must_check copy_from_user(void *to,
1878 + const void __user *from, unsigned long n)
1879 +{
1880 + unsigned long over;
1881 +
1882 + if ((long)n < 0)
1883 + return n;
1884 +
1885 + if (access_ok(VERIFY_READ, from, n)) {
1886 + if (!__builtin_constant_p(n))
1887 + check_object_size(to, n, false);
1888 + return __copy_tofrom_user((__force void __user *)to, from, n);
1889 + }
1890 + if ((unsigned long)from < TASK_SIZE) {
1891 + over = (unsigned long)from + n - TASK_SIZE;
1892 + if (!__builtin_constant_p(n - over))
1893 + check_object_size(to, n - over, false);
1894 + return __copy_tofrom_user((__force void __user *)to, from,
1895 + n - over) + over;
1896 + }
1897 + return n;
1898 +}
1899 +
1900 +static inline unsigned long __must_check copy_to_user(void __user *to,
1901 + const void *from, unsigned long n)
1902 +{
1903 + unsigned long over;
1904 +
1905 + if ((long)n < 0)
1906 + return n;
1907 +
1908 + if (access_ok(VERIFY_WRITE, to, n)) {
1909 + if (!__builtin_constant_p(n))
1910 + check_object_size(from, n, true);
1911 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1912 + }
1913 + if ((unsigned long)to < TASK_SIZE) {
1914 + over = (unsigned long)to + n - TASK_SIZE;
1915 + if (!__builtin_constant_p(n))
1916 + check_object_size(from, n - over, true);
1917 + return __copy_tofrom_user(to, (__force void __user *)from,
1918 + n - over) + over;
1919 + }
1920 + return n;
1921 +}
1922 +
1923 +#else /* __powerpc64__ */
1924 +
1925 +#define __copy_in_user(to, from, size) \
1926 + __copy_tofrom_user((to), (from), (size))
1927 +
1928 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929 +{
1930 + if ((long)n < 0 || n > INT_MAX)
1931 + return n;
1932 +
1933 + if (!__builtin_constant_p(n))
1934 + check_object_size(to, n, false);
1935 +
1936 + if (likely(access_ok(VERIFY_READ, from, n)))
1937 + n = __copy_from_user(to, from, n);
1938 + else
1939 + memset(to, 0, n);
1940 + return n;
1941 +}
1942 +
1943 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944 +{
1945 + if ((long)n < 0 || n > INT_MAX)
1946 + return n;
1947 +
1948 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949 + if (!__builtin_constant_p(n))
1950 + check_object_size(from, n, true);
1951 + n = __copy_to_user(to, from, n);
1952 + }
1953 + return n;
1954 +}
1955 +
1956 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957 + unsigned long n);
1958 +
1959 +#endif /* __powerpc64__ */
1960 +
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S
1965 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967 @@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971 + bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975 @@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979 -1: bl .save_nvgprs
1980 - mr r5,r3
1981 +1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985 diff -urNp linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S
1986 --- linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987 +++ linux-3.0.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988 @@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992 + bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996 - bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000 diff -urNp linux-3.0.4/arch/powerpc/kernel/module_32.c linux-3.0.4/arch/powerpc/kernel/module_32.c
2001 --- linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002 +++ linux-3.0.4/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2008 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016 - if (location >= mod->module_core
2017 - && location < mod->module_core + mod->core_size)
2018 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021 - else
2022 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025 + else {
2026 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027 + return ~0UL;
2028 + }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032 diff -urNp linux-3.0.4/arch/powerpc/kernel/module.c linux-3.0.4/arch/powerpc/kernel/module.c
2033 --- linux-3.0.4/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034 +++ linux-3.0.4/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035 @@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039 +#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045 + return vmalloc(size);
2046 +}
2047 +
2048 +void *module_alloc_exec(unsigned long size)
2049 +#else
2050 +void *module_alloc(unsigned long size)
2051 +#endif
2052 +
2053 +{
2054 + if (size == 0)
2055 + return NULL;
2056 +
2057 return vmalloc_exec(size);
2058 }
2059
2060 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064 +#ifdef CONFIG_PAX_KERNEXEC
2065 +void module_free_exec(struct module *mod, void *module_region)
2066 +{
2067 + module_free(mod, module_region);
2068 +}
2069 +#endif
2070 +
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074 diff -urNp linux-3.0.4/arch/powerpc/kernel/process.c linux-3.0.4/arch/powerpc/kernel/process.c
2075 --- linux-3.0.4/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076 +++ linux-3.0.4/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088 @@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096 - printk(" (%pS)",
2097 + printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101 @@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110 @@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114 -
2115 -unsigned long arch_align_stack(unsigned long sp)
2116 -{
2117 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118 - sp -= get_random_int() & ~PAGE_MASK;
2119 - return sp & ~0xf;
2120 -}
2121 -
2122 -static inline unsigned long brk_rnd(void)
2123 -{
2124 - unsigned long rnd = 0;
2125 -
2126 - /* 8MB for 32bit, 1GB for 64bit */
2127 - if (is_32bit_task())
2128 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129 - else
2130 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131 -
2132 - return rnd << PAGE_SHIFT;
2133 -}
2134 -
2135 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2136 -{
2137 - unsigned long base = mm->brk;
2138 - unsigned long ret;
2139 -
2140 -#ifdef CONFIG_PPC_STD_MMU_64
2141 - /*
2142 - * If we are using 1TB segments and we are allowed to randomise
2143 - * the heap, we can put it above 1TB so it is backed by a 1TB
2144 - * segment. Otherwise the heap will be in the bottom 1TB
2145 - * which always uses 256MB segments and this may result in a
2146 - * performance penalty.
2147 - */
2148 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150 -#endif
2151 -
2152 - ret = PAGE_ALIGN(base + brk_rnd());
2153 -
2154 - if (ret < mm->brk)
2155 - return mm->brk;
2156 -
2157 - return ret;
2158 -}
2159 -
2160 -unsigned long randomize_et_dyn(unsigned long base)
2161 -{
2162 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163 -
2164 - if (ret < base)
2165 - return base;
2166 -
2167 - return ret;
2168 -}
2169 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_32.c linux-3.0.4/arch/powerpc/kernel/signal_32.c
2170 --- linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171 +++ linux-3.0.4/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181 diff -urNp linux-3.0.4/arch/powerpc/kernel/signal_64.c linux-3.0.4/arch/powerpc/kernel/signal_64.c
2182 --- linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183 +++ linux-3.0.4/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193 diff -urNp linux-3.0.4/arch/powerpc/kernel/traps.c linux-3.0.4/arch/powerpc/kernel/traps.c
2194 --- linux-3.0.4/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195 +++ linux-3.0.4/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200 +extern void gr_handle_kernel_exploit(void);
2201 +
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209 + gr_handle_kernel_exploit();
2210 +
2211 oops_exit();
2212 do_exit(err);
2213
2214 diff -urNp linux-3.0.4/arch/powerpc/kernel/vdso.c linux-3.0.4/arch/powerpc/kernel/vdso.c
2215 --- linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216 +++ linux-3.0.4/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217 @@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221 +#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229 - current->mm->context.vdso_base = 0;
2230 + current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238 - 0, 0);
2239 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243 diff -urNp linux-3.0.4/arch/powerpc/lib/usercopy_64.c linux-3.0.4/arch/powerpc/lib/usercopy_64.c
2244 --- linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245 +++ linux-3.0.4/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246 @@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_READ, from, n)))
2253 - n = __copy_from_user(to, from, n);
2254 - else
2255 - memset(to, 0, n);
2256 - return n;
2257 -}
2258 -
2259 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260 -{
2261 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2262 - n = __copy_to_user(to, from, n);
2263 - return n;
2264 -}
2265 -
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273 -EXPORT_SYMBOL(copy_from_user);
2274 -EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277 diff -urNp linux-3.0.4/arch/powerpc/mm/fault.c linux-3.0.4/arch/powerpc/mm/fault.c
2278 --- linux-3.0.4/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279 +++ linux-3.0.4/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280 @@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284 +#include <linux/slab.h>
2285 +#include <linux/pagemap.h>
2286 +#include <linux/compiler.h>
2287 +#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291 @@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295 +#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299 @@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303 +#ifdef CONFIG_PAX_PAGEEXEC
2304 +/*
2305 + * PaX: decide what to do with offenders (regs->nip = fault address)
2306 + *
2307 + * returns 1 when task should be killed
2308 + */
2309 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2310 +{
2311 + return 1;
2312 +}
2313 +
2314 +void pax_report_insns(void *pc, void *sp)
2315 +{
2316 + unsigned long i;
2317 +
2318 + printk(KERN_ERR "PAX: bytes at PC: ");
2319 + for (i = 0; i < 5; i++) {
2320 + unsigned int c;
2321 + if (get_user(c, (unsigned int __user *)pc+i))
2322 + printk(KERN_CONT "???????? ");
2323 + else
2324 + printk(KERN_CONT "%08x ", c);
2325 + }
2326 + printk("\n");
2327 +}
2328 +#endif
2329 +
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337 - error_code &= 0x48200000;
2338 + error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342 @@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346 - if (error_code & 0x10000000)
2347 + if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351 @@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355 - if (error_code & DSISR_PROTFAULT)
2356 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360 @@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364 +
2365 +#ifdef CONFIG_PAX_PAGEEXEC
2366 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367 +#ifdef CONFIG_PPC_STD_MMU
2368 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369 +#else
2370 + if (is_exec && regs->nip == address) {
2371 +#endif
2372 + switch (pax_handle_fetch_fault(regs)) {
2373 + }
2374 +
2375 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376 + do_group_exit(SIGKILL);
2377 + }
2378 + }
2379 +#endif
2380 +
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384 diff -urNp linux-3.0.4/arch/powerpc/mm/mmap_64.c linux-3.0.4/arch/powerpc/mm/mmap_64.c
2385 --- linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386 +++ linux-3.0.4/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391 +
2392 +#ifdef CONFIG_PAX_RANDMMAP
2393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2394 + mm->mmap_base += mm->delta_mmap;
2395 +#endif
2396 +
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401 +
2402 +#ifdef CONFIG_PAX_RANDMMAP
2403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2404 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405 +#endif
2406 +
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410 diff -urNp linux-3.0.4/arch/powerpc/mm/slice.c linux-3.0.4/arch/powerpc/mm/slice.c
2411 --- linux-3.0.4/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412 +++ linux-3.0.4/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417 - return (!vma || (addr + len) <= vma->vm_start);
2418 + return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422 @@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426 - if (!vma || addr + len <= vma->vm_start) {
2427 + if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435 - addr = mm->mmap_base;
2436 - while (addr > len) {
2437 + if (mm->mmap_base < len)
2438 + addr = -ENOMEM;
2439 + else
2440 + addr = mm->mmap_base - len;
2441 +
2442 + while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453 - if (!vma || (addr + len) <= vma->vm_start) {
2454 + if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462 - addr = vma->vm_start;
2463 + addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471 +#ifdef CONFIG_PAX_RANDMMAP
2472 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473 + addr = 0;
2474 +#endif
2475 +
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479 diff -urNp linux-3.0.4/arch/s390/include/asm/elf.h linux-3.0.4/arch/s390/include/asm/elf.h
2480 --- linux-3.0.4/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481 +++ linux-3.0.4/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486 -extern unsigned long randomize_et_dyn(unsigned long base);
2487 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489 +
2490 +#ifdef CONFIG_PAX_ASLR
2491 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492 +
2493 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495 +#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499 @@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504 -#define arch_randomize_brk arch_randomize_brk
2505 -
2506 #endif
2507 diff -urNp linux-3.0.4/arch/s390/include/asm/system.h linux-3.0.4/arch/s390/include/asm/system.h
2508 --- linux-3.0.4/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509 +++ linux-3.0.4/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514 -extern unsigned long arch_align_stack(unsigned long sp);
2515 +#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519 diff -urNp linux-3.0.4/arch/s390/include/asm/uaccess.h linux-3.0.4/arch/s390/include/asm/uaccess.h
2520 --- linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521 +++ linux-3.0.4/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526 +
2527 + if ((long)n < 0)
2528 + return n;
2529 +
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537 + if ((long)n < 0)
2538 + return n;
2539 +
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547 +
2548 + if ((long)n < 0)
2549 + return n;
2550 +
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554 diff -urNp linux-3.0.4/arch/s390/kernel/module.c linux-3.0.4/arch/s390/kernel/module.c
2555 --- linux-3.0.4/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556 +++ linux-3.0.4/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561 - me->core_size = ALIGN(me->core_size, 4);
2562 - me->arch.got_offset = me->core_size;
2563 - me->core_size += me->arch.got_size;
2564 - me->arch.plt_offset = me->core_size;
2565 - me->core_size += me->arch.plt_size;
2566 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567 + me->arch.got_offset = me->core_size_rw;
2568 + me->core_size_rw += me->arch.got_size;
2569 + me->arch.plt_offset = me->core_size_rx;
2570 + me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578 - gotent = me->module_core + me->arch.got_offset +
2579 + gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2588 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596 - ip = me->module_core + me->arch.plt_offset +
2597 + ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605 - val = (Elf_Addr) me->module_core +
2606 + val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2615 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628 diff -urNp linux-3.0.4/arch/s390/kernel/process.c linux-3.0.4/arch/s390/kernel/process.c
2629 --- linux-3.0.4/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630 +++ linux-3.0.4/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635 -
2636 -unsigned long arch_align_stack(unsigned long sp)
2637 -{
2638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639 - sp -= get_random_int() & ~PAGE_MASK;
2640 - return sp & ~0xf;
2641 -}
2642 -
2643 -static inline unsigned long brk_rnd(void)
2644 -{
2645 - /* 8MB for 32bit, 1GB for 64bit */
2646 - if (is_32bit_task())
2647 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648 - else
2649 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650 -}
2651 -
2652 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655 -
2656 - if (ret < mm->brk)
2657 - return mm->brk;
2658 - return ret;
2659 -}
2660 -
2661 -unsigned long randomize_et_dyn(unsigned long base)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664 -
2665 - if (!(current->flags & PF_RANDOMIZE))
2666 - return base;
2667 - if (ret < base)
2668 - return base;
2669 - return ret;
2670 -}
2671 diff -urNp linux-3.0.4/arch/s390/kernel/setup.c linux-3.0.4/arch/s390/kernel/setup.c
2672 --- linux-3.0.4/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673 +++ linux-3.0.4/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678 -unsigned int user_mode = HOME_SPACE_MODE;
2679 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683 diff -urNp linux-3.0.4/arch/s390/mm/mmap.c linux-3.0.4/arch/s390/mm/mmap.c
2684 --- linux-3.0.4/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685 +++ linux-3.0.4/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690 +
2691 +#ifdef CONFIG_PAX_RANDMMAP
2692 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2693 + mm->mmap_base += mm->delta_mmap;
2694 +#endif
2695 +
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700 +
2701 +#ifdef CONFIG_PAX_RANDMMAP
2702 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2703 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704 +#endif
2705 +
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713 +
2714 +#ifdef CONFIG_PAX_RANDMMAP
2715 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2716 + mm->mmap_base += mm->delta_mmap;
2717 +#endif
2718 +
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723 +
2724 +#ifdef CONFIG_PAX_RANDMMAP
2725 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2726 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727 +#endif
2728 +
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732 diff -urNp linux-3.0.4/arch/score/include/asm/system.h linux-3.0.4/arch/score/include/asm/system.h
2733 --- linux-3.0.4/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734 +++ linux-3.0.4/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735 @@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739 -extern unsigned long arch_align_stack(unsigned long sp);
2740 +#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744 diff -urNp linux-3.0.4/arch/score/kernel/process.c linux-3.0.4/arch/score/kernel/process.c
2745 --- linux-3.0.4/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746 +++ linux-3.0.4/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751 -
2752 -unsigned long arch_align_stack(unsigned long sp)
2753 -{
2754 - return sp;
2755 -}
2756 diff -urNp linux-3.0.4/arch/sh/mm/mmap.c linux-3.0.4/arch/sh/mm/mmap.c
2757 --- linux-3.0.4/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758 +++ linux-3.0.4/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763 - if (TASK_SIZE - len >= addr &&
2764 - (!vma || addr + len <= vma->vm_start))
2765 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769 @@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773 - if (likely(!vma || addr + len <= vma->vm_start)) {
2774 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782 - if (TASK_SIZE - len >= addr &&
2783 - (!vma || addr + len <= vma->vm_start))
2784 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792 - if (!vma || addr <= vma->vm_start) {
2793 + if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801 - addr = mm->mmap_base-len;
2802 - if (do_colour_align)
2803 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804 + addr = mm->mmap_base - len;
2805
2806 do {
2807 + if (do_colour_align)
2808 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815 - if (likely(!vma || addr+len <= vma->vm_start)) {
2816 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824 - addr = vma->vm_start-len;
2825 - if (do_colour_align)
2826 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827 - } while (likely(len < vma->vm_start));
2828 + addr = skip_heap_stack_gap(vma, len);
2829 + } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833 diff -urNp linux-3.0.4/arch/sparc/include/asm/atomic_64.h linux-3.0.4/arch/sparc/include/asm/atomic_64.h
2834 --- linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835 +++ linux-3.0.4/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836 @@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841 +{
2842 + return v->counter;
2843 +}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846 +{
2847 + return v->counter;
2848 +}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852 +{
2853 + v->counter = i;
2854 +}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857 +{
2858 + v->counter = i;
2859 +}
2860
2861 extern void atomic_add(int, atomic_t *);
2862 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882 +{
2883 + return atomic_add_ret_unchecked(1, v);
2884 +}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887 +{
2888 + return atomic64_add_ret_unchecked(1, v);
2889 +}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896 +{
2897 + return atomic_add_ret_unchecked(i, v);
2898 +}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901 +{
2902 + return atomic64_add_ret_unchecked(i, v);
2903 +}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912 +{
2913 + return atomic_inc_return_unchecked(v) == 0;
2914 +}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923 +{
2924 + atomic_add_unchecked(1, v);
2925 +}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928 +{
2929 + atomic64_add_unchecked(1, v);
2930 +}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934 +{
2935 + atomic_sub_unchecked(1, v);
2936 +}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939 +{
2940 + atomic64_sub_unchecked(1, v);
2941 +}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948 +{
2949 + return cmpxchg(&v->counter, old, new);
2950 +}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953 +{
2954 + return xchg(&v->counter, new);
2955 +}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959 - int c, old;
2960 + int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963 - if (unlikely(c == (u)))
2964 + if (unlikely(c == u))
2965 break;
2966 - old = atomic_cmpxchg((v), c, c + (a));
2967 +
2968 + asm volatile("addcc %2, %0, %0\n"
2969 +
2970 +#ifdef CONFIG_PAX_REFCOUNT
2971 + "tvs %%icc, 6\n"
2972 +#endif
2973 +
2974 + : "=r" (new)
2975 + : "0" (c), "ir" (a)
2976 + : "cc");
2977 +
2978 + old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983 - return c != (u);
2984 + return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993 +{
2994 + return xchg(&v->counter, new);
2995 +}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999 - long c, old;
3000 + long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003 - if (unlikely(c == (u)))
3004 + if (unlikely(c == u))
3005 break;
3006 - old = atomic64_cmpxchg((v), c, c + (a));
3007 +
3008 + asm volatile("addcc %2, %0, %0\n"
3009 +
3010 +#ifdef CONFIG_PAX_REFCOUNT
3011 + "tvs %%xcc, 6\n"
3012 +#endif
3013 +
3014 + : "=r" (new)
3015 + : "0" (c), "ir" (a)
3016 + : "cc");
3017 +
3018 + old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023 - return c != (u);
3024 + return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028 diff -urNp linux-3.0.4/arch/sparc/include/asm/cache.h linux-3.0.4/arch/sparc/include/asm/cache.h
3029 --- linux-3.0.4/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030 +++ linux-3.0.4/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031 @@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035 -#define L1_CACHE_BYTES 32
3036 +#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_32.h linux-3.0.4/arch/sparc/include/asm/elf_32.h
3041 --- linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042 +++ linux-3.0.4/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043 @@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047 +#ifdef CONFIG_PAX_ASLR
3048 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049 +
3050 +#define PAX_DELTA_MMAP_LEN 16
3051 +#define PAX_DELTA_STACK_LEN 16
3052 +#endif
3053 +
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057 diff -urNp linux-3.0.4/arch/sparc/include/asm/elf_64.h linux-3.0.4/arch/sparc/include/asm/elf_64.h
3058 --- linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059 +++ linux-3.0.4/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060 @@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064 +#ifdef CONFIG_PAX_ASLR
3065 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066 +
3067 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069 +#endif
3070 +
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtable_32.h linux-3.0.4/arch/sparc/include/asm/pgtable_32.h
3075 --- linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076 +++ linux-3.0.4/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081 +
3082 +#ifdef CONFIG_PAX_PAGEEXEC
3083 +BTFIXUPDEF_INT(page_shared_noexec)
3084 +BTFIXUPDEF_INT(page_copy_noexec)
3085 +BTFIXUPDEF_INT(page_readonly_noexec)
3086 +#endif
3087 +
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095 +#ifdef CONFIG_PAX_PAGEEXEC
3096 +extern pgprot_t PAGE_SHARED_NOEXEC;
3097 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099 +#else
3100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101 +# define PAGE_COPY_NOEXEC PAGE_COPY
3102 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103 +#endif
3104 +
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108 diff -urNp linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h
3109 --- linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110 +++ linux-3.0.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111 @@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115 +
3116 +#ifdef CONFIG_PAX_PAGEEXEC
3117 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 +#endif
3121 +
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125 diff -urNp linux-3.0.4/arch/sparc/include/asm/spinlock_64.h linux-3.0.4/arch/sparc/include/asm/spinlock_64.h
3126 --- linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-07-21 22:17:23.000000000 -0400
3127 +++ linux-3.0.4/arch/sparc/include/asm/spinlock_64.h 2011-08-23 21:47:55.000000000 -0400
3128 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132 -static void inline arch_read_lock(arch_rwlock_t *lock)
3133 +static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140 -"4: add %0, 1, %1\n"
3141 +"4: addcc %0, 1, %1\n"
3142 +
3143 +#ifdef CONFIG_PAX_REFCOUNT
3144 +" tvs %%icc, 6\n"
3145 +#endif
3146 +
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154 - : "memory");
3155 + : "memory", "cc");
3156 }
3157
3158 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3159 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167 -" add %0, 1, %1\n"
3168 +" addcc %0, 1, %1\n"
3169 +
3170 +#ifdef CONFIG_PAX_REFCOUNT
3171 +" tvs %%icc, 6\n"
3172 +#endif
3173 +
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3182 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188 -" sub %0, 1, %1\n"
3189 +" subcc %0, 1, %1\n"
3190 +
3191 +#ifdef CONFIG_PAX_REFCOUNT
3192 +" tvs %%icc, 6\n"
3193 +#endif
3194 +
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202 -static void inline arch_write_lock(arch_rwlock_t *lock)
3203 +static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3212 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3221 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_32.h linux-3.0.4/arch/sparc/include/asm/thread_info_32.h
3226 --- linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228 @@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232 +
3233 + unsigned long lowest_stack;
3234 };
3235
3236 /*
3237 diff -urNp linux-3.0.4/arch/sparc/include/asm/thread_info_64.h linux-3.0.4/arch/sparc/include/asm/thread_info_64.h
3238 --- linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239 +++ linux-3.0.4/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240 @@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244 + unsigned long lowest_stack;
3245 +
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_32.h linux-3.0.4/arch/sparc/include/asm/uaccess_32.h
3250 --- linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 - if (n && __access_ok((unsigned long) to, n))
3257 + if ((long)n < 0)
3258 + return n;
3259 +
3260 + if (n && __access_ok((unsigned long) to, n)) {
3261 + if (!__builtin_constant_p(n))
3262 + check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264 - else
3265 + } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271 + if ((long)n < 0)
3272 + return n;
3273 +
3274 + if (!__builtin_constant_p(n))
3275 + check_object_size(from, n, true);
3276 +
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282 - if (n && __access_ok((unsigned long) from, n))
3283 + if ((long)n < 0)
3284 + return n;
3285 +
3286 + if (n && __access_ok((unsigned long) from, n)) {
3287 + if (!__builtin_constant_p(n))
3288 + check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290 - else
3291 + } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297 + if ((long)n < 0)
3298 + return n;
3299 +
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess_64.h linux-3.0.4/arch/sparc/include/asm/uaccess_64.h
3304 --- linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305 +++ linux-3.0.4/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306 @@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310 +#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318 - unsigned long ret = ___copy_from_user(to, from, size);
3319 + unsigned long ret;
3320
3321 + if ((long)size < 0 || size > INT_MAX)
3322 + return size;
3323 +
3324 + if (!__builtin_constant_p(size))
3325 + check_object_size(to, size, false);
3326 +
3327 + ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335 - unsigned long ret = ___copy_to_user(to, from, size);
3336 + unsigned long ret;
3337 +
3338 + if ((long)size < 0 || size > INT_MAX)
3339 + return size;
3340 +
3341 + if (!__builtin_constant_p(size))
3342 + check_object_size(from, size, true);
3343
3344 + ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348 diff -urNp linux-3.0.4/arch/sparc/include/asm/uaccess.h linux-3.0.4/arch/sparc/include/asm/uaccess.h
3349 --- linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350 +++ linux-3.0.4/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351 @@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354 +
3355 +#ifdef __KERNEL__
3356 +#ifndef __ASSEMBLY__
3357 +#include <linux/types.h>
3358 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359 +#endif
3360 +#endif
3361 +
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365 diff -urNp linux-3.0.4/arch/sparc/kernel/Makefile linux-3.0.4/arch/sparc/kernel/Makefile
3366 --- linux-3.0.4/arch/sparc/kernel/Makefile 2011-07-21 22:17:23.000000000 -0400
3367 +++ linux-3.0.4/arch/sparc/kernel/Makefile 2011-08-23 21:47:55.000000000 -0400
3368 @@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372 -ccflags-y := -Werror
3373 +#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377 diff -urNp linux-3.0.4/arch/sparc/kernel/process_32.c linux-3.0.4/arch/sparc/kernel/process_32.c
3378 --- linux-3.0.4/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379 +++ linux-3.0.4/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384 - printk("%pS\n", (void *) rw->ins[7]);
3385 + printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393 - printk("PC: <%pS>\n", (void *) r->pc);
3394 + printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410 - printk("%pS ] ", (void *) pc);
3411 + printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415 diff -urNp linux-3.0.4/arch/sparc/kernel/process_64.c linux-3.0.4/arch/sparc/kernel/process_64.c
3416 --- linux-3.0.4/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417 +++ linux-3.0.4/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3431 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c
3454 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460 - addr = TASK_UNMAPPED_BASE;
3461 + addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469 - if (!vmm || addr + len <= vmm->vm_start)
3470 + if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474 diff -urNp linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c
3475 --- linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476 +++ linux-3.0.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481 - if ((flags & MAP_SHARED) &&
3482 + if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490 +#ifdef CONFIG_PAX_RANDMMAP
3491 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492 +#endif
3493 +
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501 - if (task_size - len >= addr &&
3502 - (!vma || addr + len <= vma->vm_start))
3503 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508 - start_addr = addr = mm->free_area_cache;
3509 + start_addr = addr = mm->free_area_cache;
3510 } else {
3511 - start_addr = addr = TASK_UNMAPPED_BASE;
3512 + start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516 @@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520 - if (start_addr != TASK_UNMAPPED_BASE) {
3521 - start_addr = addr = TASK_UNMAPPED_BASE;
3522 + if (start_addr != mm->mmap_base) {
3523 + start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529 - if (likely(!vma || addr + len <= vma->vm_start)) {
3530 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538 - if ((flags & MAP_SHARED) &&
3539 + if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547 - if (task_size - len >= addr &&
3548 - (!vma || addr + len <= vma->vm_start))
3549 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557 - if (!vma || addr <= vma->vm_start) {
3558 + if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566 - addr = mm->mmap_base-len;
3567 - if (do_color_align)
3568 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569 + addr = mm->mmap_base - len;
3570
3571 do {
3572 + if (do_color_align)
3573 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580 - if (likely(!vma || addr+len <= vma->vm_start)) {
3581 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589 - addr = vma->vm_start-len;
3590 - if (do_color_align)
3591 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592 - } while (likely(len < vma->vm_start));
3593 + addr = skip_heap_stack_gap(vma, len);
3594 + } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602 +
3603 +#ifdef CONFIG_PAX_RANDMMAP
3604 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3605 + mm->mmap_base += mm->delta_mmap;
3606 +#endif
3607 +
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615 +
3616 +#ifdef CONFIG_PAX_RANDMMAP
3617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3618 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619 +#endif
3620 +
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_32.c linux-3.0.4/arch/sparc/kernel/traps_32.c
3625 --- linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626 +++ linux-3.0.4/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631 +extern void gr_handle_kernel_exploit(void);
3632 +
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648 - if(regs->psr & PSR_PS)
3649 + if(regs->psr & PSR_PS) {
3650 + gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652 + }
3653 do_exit(SIGSEGV);
3654 }
3655
3656 diff -urNp linux-3.0.4/arch/sparc/kernel/traps_64.c linux-3.0.4/arch/sparc/kernel/traps_64.c
3657 --- linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658 +++ linux-3.0.4/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672 +
3673 +#ifdef CONFIG_PAX_REFCOUNT
3674 + if (lvl == 6)
3675 + pax_report_refcount_overflow(regs);
3676 +#endif
3677 +
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685 -
3686 +
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691 +#ifdef CONFIG_PAX_REFCOUNT
3692 + if (lvl == 6)
3693 + pax_report_refcount_overflow(regs);
3694 +#endif
3695 +
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703 - printk("TPC<%pS>\n", (void *) regs->tpc);
3704 + printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3755 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3762 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770 +extern void gr_handle_kernel_exploit(void);
3771 +
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788 - if (regs->tstate & TSTATE_PRIV)
3789 + if (regs->tstate & TSTATE_PRIV) {
3790 + gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792 + }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796 diff -urNp linux-3.0.4/arch/sparc/kernel/unaligned_64.c linux-3.0.4/arch/sparc/kernel/unaligned_64.c
3797 --- linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798 +++ linux-3.0.4/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808 diff -urNp linux-3.0.4/arch/sparc/lib/atomic_64.S linux-3.0.4/arch/sparc/lib/atomic_64.S
3809 --- linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810 +++ linux-3.0.4/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811 @@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815 - add %g1, %o0, %g7
3816 + addcc %g1, %o0, %g7
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 + tvs %icc, 6
3820 +#endif
3821 +
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829 + .globl atomic_add_unchecked
3830 + .type atomic_add_unchecked,#function
3831 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832 + BACKOFF_SETUP(%o2)
3833 +1: lduw [%o1], %g1
3834 + add %g1, %o0, %g7
3835 + cas [%o1], %g1, %g7
3836 + cmp %g1, %g7
3837 + bne,pn %icc, 2f
3838 + nop
3839 + retl
3840 + nop
3841 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3842 + .size atomic_add_unchecked, .-atomic_add_unchecked
3843 +
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849 - sub %g1, %o0, %g7
3850 + subcc %g1, %o0, %g7
3851 +
3852 +#ifdef CONFIG_PAX_REFCOUNT
3853 + tvs %icc, 6
3854 +#endif
3855 +
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863 + .globl atomic_sub_unchecked
3864 + .type atomic_sub_unchecked,#function
3865 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866 + BACKOFF_SETUP(%o2)
3867 +1: lduw [%o1], %g1
3868 + sub %g1, %o0, %g7
3869 + cas [%o1], %g1, %g7
3870 + cmp %g1, %g7
3871 + bne,pn %icc, 2f
3872 + nop
3873 + retl
3874 + nop
3875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3876 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877 +
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883 - add %g1, %o0, %g7
3884 + addcc %g1, %o0, %g7
3885 +
3886 +#ifdef CONFIG_PAX_REFCOUNT
3887 + tvs %icc, 6
3888 +#endif
3889 +
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897 + .globl atomic_add_ret_unchecked
3898 + .type atomic_add_ret_unchecked,#function
3899 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900 + BACKOFF_SETUP(%o2)
3901 +1: lduw [%o1], %g1
3902 + addcc %g1, %o0, %g7
3903 + cas [%o1], %g1, %g7
3904 + cmp %g1, %g7
3905 + bne,pn %icc, 2f
3906 + add %g7, %o0, %g7
3907 + sra %g7, 0, %o0
3908 + retl
3909 + nop
3910 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3911 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912 +
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918 - sub %g1, %o0, %g7
3919 + subcc %g1, %o0, %g7
3920 +
3921 +#ifdef CONFIG_PAX_REFCOUNT
3922 + tvs %icc, 6
3923 +#endif
3924 +
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932 - add %g1, %o0, %g7
3933 + addcc %g1, %o0, %g7
3934 +
3935 +#ifdef CONFIG_PAX_REFCOUNT
3936 + tvs %xcc, 6
3937 +#endif
3938 +
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946 + .globl atomic64_add_unchecked
3947 + .type atomic64_add_unchecked,#function
3948 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949 + BACKOFF_SETUP(%o2)
3950 +1: ldx [%o1], %g1
3951 + addcc %g1, %o0, %g7
3952 + casx [%o1], %g1, %g7
3953 + cmp %g1, %g7
3954 + bne,pn %xcc, 2f
3955 + nop
3956 + retl
3957 + nop
3958 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3959 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960 +
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966 - sub %g1, %o0, %g7
3967 + subcc %g1, %o0, %g7
3968 +
3969 +#ifdef CONFIG_PAX_REFCOUNT
3970 + tvs %xcc, 6
3971 +#endif
3972 +
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980 + .globl atomic64_sub_unchecked
3981 + .type atomic64_sub_unchecked,#function
3982 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983 + BACKOFF_SETUP(%o2)
3984 +1: ldx [%o1], %g1
3985 + subcc %g1, %o0, %g7
3986 + casx [%o1], %g1, %g7
3987 + cmp %g1, %g7
3988 + bne,pn %xcc, 2f
3989 + nop
3990 + retl
3991 + nop
3992 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3993 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994 +
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000 - add %g1, %o0, %g7
4001 + addcc %g1, %o0, %g7
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 + tvs %xcc, 6
4005 +#endif
4006 +
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014 + .globl atomic64_add_ret_unchecked
4015 + .type atomic64_add_ret_unchecked,#function
4016 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017 + BACKOFF_SETUP(%o2)
4018 +1: ldx [%o1], %g1
4019 + addcc %g1, %o0, %g7
4020 + casx [%o1], %g1, %g7
4021 + cmp %g1, %g7
4022 + bne,pn %xcc, 2f
4023 + add %g7, %o0, %g7
4024 + mov %g7, %o0
4025 + retl
4026 + nop
4027 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4028 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029 +
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035 - sub %g1, %o0, %g7
4036 + subcc %g1, %o0, %g7
4037 +
4038 +#ifdef CONFIG_PAX_REFCOUNT
4039 + tvs %xcc, 6
4040 +#endif
4041 +
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045 diff -urNp linux-3.0.4/arch/sparc/lib/ksyms.c linux-3.0.4/arch/sparc/lib/ksyms.c
4046 --- linux-3.0.4/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047 +++ linux-3.0.4/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052 +EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056 +EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059 +EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067 diff -urNp linux-3.0.4/arch/sparc/lib/Makefile linux-3.0.4/arch/sparc/lib/Makefile
4068 --- linux-3.0.4/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069 +++ linux-3.0.4/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070 @@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074 -ccflags-y := -Werror
4075 +#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079 diff -urNp linux-3.0.4/arch/sparc/Makefile linux-3.0.4/arch/sparc/Makefile
4080 --- linux-3.0.4/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081 +++ linux-3.0.4/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091 diff -urNp linux-3.0.4/arch/sparc/mm/fault_32.c linux-3.0.4/arch/sparc/mm/fault_32.c
4092 --- linux-3.0.4/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093 +++ linux-3.0.4/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094 @@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098 +#include <linux/slab.h>
4099 +#include <linux/pagemap.h>
4100 +#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108 +#ifdef CONFIG_PAX_PAGEEXEC
4109 +#ifdef CONFIG_PAX_DLRESOLVE
4110 +static void pax_emuplt_close(struct vm_area_struct *vma)
4111 +{
4112 + vma->vm_mm->call_dl_resolve = 0UL;
4113 +}
4114 +
4115 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116 +{
4117 + unsigned int *kaddr;
4118 +
4119 + vmf->page = alloc_page(GFP_HIGHUSER);
4120 + if (!vmf->page)
4121 + return VM_FAULT_OOM;
4122 +
4123 + kaddr = kmap(vmf->page);
4124 + memset(kaddr, 0, PAGE_SIZE);
4125 + kaddr[0] = 0x9DE3BFA8U; /* save */
4126 + flush_dcache_page(vmf->page);
4127 + kunmap(vmf->page);
4128 + return VM_FAULT_MAJOR;
4129 +}
4130 +
4131 +static const struct vm_operations_struct pax_vm_ops = {
4132 + .close = pax_emuplt_close,
4133 + .fault = pax_emuplt_fault
4134 +};
4135 +
4136 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137 +{
4138 + int ret;
4139 +
4140 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4141 + vma->vm_mm = current->mm;
4142 + vma->vm_start = addr;
4143 + vma->vm_end = addr + PAGE_SIZE;
4144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146 + vma->vm_ops = &pax_vm_ops;
4147 +
4148 + ret = insert_vm_struct(current->mm, vma);
4149 + if (ret)
4150 + return ret;
4151 +
4152 + ++current->mm->total_vm;
4153 + return 0;
4154 +}
4155 +#endif
4156 +
4157 +/*
4158 + * PaX: decide what to do with offenders (regs->pc = fault address)
4159 + *
4160 + * returns 1 when task should be killed
4161 + * 2 when patched PLT trampoline was detected
4162 + * 3 when unpatched PLT trampoline was detected
4163 + */
4164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4165 +{
4166 +
4167 +#ifdef CONFIG_PAX_EMUPLT
4168 + int err;
4169 +
4170 + do { /* PaX: patched PLT emulation #1 */
4171 + unsigned int sethi1, sethi2, jmpl;
4172 +
4173 + err = get_user(sethi1, (unsigned int *)regs->pc);
4174 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176 +
4177 + if (err)
4178 + break;
4179 +
4180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183 + {
4184 + unsigned int addr;
4185 +
4186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187 + addr = regs->u_regs[UREG_G1];
4188 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189 + regs->pc = addr;
4190 + regs->npc = addr+4;
4191 + return 2;
4192 + }
4193 + } while (0);
4194 +
4195 + { /* PaX: patched PLT emulation #2 */
4196 + unsigned int ba;
4197 +
4198 + err = get_user(ba, (unsigned int *)regs->pc);
4199 +
4200 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201 + unsigned int addr;
4202 +
4203 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204 + regs->pc = addr;
4205 + regs->npc = addr+4;
4206 + return 2;
4207 + }
4208 + }
4209 +
4210 + do { /* PaX: patched PLT emulation #3 */
4211 + unsigned int sethi, jmpl, nop;
4212 +
4213 + err = get_user(sethi, (unsigned int *)regs->pc);
4214 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216 +
4217 + if (err)
4218 + break;
4219 +
4220 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222 + nop == 0x01000000U)
4223 + {
4224 + unsigned int addr;
4225 +
4226 + addr = (sethi & 0x003FFFFFU) << 10;
4227 + regs->u_regs[UREG_G1] = addr;
4228 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229 + regs->pc = addr;
4230 + regs->npc = addr+4;
4231 + return 2;
4232 + }
4233 + } while (0);
4234 +
4235 + do { /* PaX: unpatched PLT emulation step 1 */
4236 + unsigned int sethi, ba, nop;
4237 +
4238 + err = get_user(sethi, (unsigned int *)regs->pc);
4239 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241 +
4242 + if (err)
4243 + break;
4244 +
4245 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247 + nop == 0x01000000U)
4248 + {
4249 + unsigned int addr, save, call;
4250 +
4251 + if ((ba & 0xFFC00000U) == 0x30800000U)
4252 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253 + else
4254 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255 +
4256 + err = get_user(save, (unsigned int *)addr);
4257 + err |= get_user(call, (unsigned int *)(addr+4));
4258 + err |= get_user(nop, (unsigned int *)(addr+8));
4259 + if (err)
4260 + break;
4261 +
4262 +#ifdef CONFIG_PAX_DLRESOLVE
4263 + if (save == 0x9DE3BFA8U &&
4264 + (call & 0xC0000000U) == 0x40000000U &&
4265 + nop == 0x01000000U)
4266 + {
4267 + struct vm_area_struct *vma;
4268 + unsigned long call_dl_resolve;
4269 +
4270 + down_read(&current->mm->mmap_sem);
4271 + call_dl_resolve = current->mm->call_dl_resolve;
4272 + up_read(&current->mm->mmap_sem);
4273 + if (likely(call_dl_resolve))
4274 + goto emulate;
4275 +
4276 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277 +
4278 + down_write(&current->mm->mmap_sem);
4279 + if (current->mm->call_dl_resolve) {
4280 + call_dl_resolve = current->mm->call_dl_resolve;
4281 + up_write(&current->mm->mmap_sem);
4282 + if (vma)
4283 + kmem_cache_free(vm_area_cachep, vma);
4284 + goto emulate;
4285 + }
4286 +
4287 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289 + up_write(&current->mm->mmap_sem);
4290 + if (vma)
4291 + kmem_cache_free(vm_area_cachep, vma);
4292 + return 1;
4293 + }
4294 +
4295 + if (pax_insert_vma(vma, call_dl_resolve)) {
4296 + up_write(&current->mm->mmap_sem);
4297 + kmem_cache_free(vm_area_cachep, vma);
4298 + return 1;
4299 + }
4300 +
4301 + current->mm->call_dl_resolve = call_dl_resolve;
4302 + up_write(&current->mm->mmap_sem);
4303 +
4304 +emulate:
4305 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306 + regs->pc = call_dl_resolve;
4307 + regs->npc = addr+4;
4308 + return 3;
4309 + }
4310 +#endif
4311 +
4312 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313 + if ((save & 0xFFC00000U) == 0x05000000U &&
4314 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4315 + nop == 0x01000000U)
4316 + {
4317 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318 + regs->u_regs[UREG_G2] = addr + 4;
4319 + addr = (save & 0x003FFFFFU) << 10;
4320 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321 + regs->pc = addr;
4322 + regs->npc = addr+4;
4323 + return 3;
4324 + }
4325 + }
4326 + } while (0);
4327 +
4328 + do { /* PaX: unpatched PLT emulation step 2 */
4329 + unsigned int save, call, nop;
4330 +
4331 + err = get_user(save, (unsigned int *)(regs->pc-4));
4332 + err |= get_user(call, (unsigned int *)regs->pc);
4333 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334 + if (err)
4335 + break;
4336 +
4337 + if (save == 0x9DE3BFA8U &&
4338 + (call & 0xC0000000U) == 0x40000000U &&
4339 + nop == 0x01000000U)
4340 + {
4341 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342 +
4343 + regs->u_regs[UREG_RETPC] = regs->pc;
4344 + regs->pc = dl_resolve;
4345 + regs->npc = dl_resolve+4;
4346 + return 3;
4347 + }
4348 + } while (0);
4349 +#endif
4350 +
4351 + return 1;
4352 +}
4353 +
4354 +void pax_report_insns(void *pc, void *sp)
4355 +{
4356 + unsigned long i;
4357 +
4358 + printk(KERN_ERR "PAX: bytes at PC: ");
4359 + for (i = 0; i < 8; i++) {
4360 + unsigned int c;
4361 + if (get_user(c, (unsigned int *)pc+i))
4362 + printk(KERN_CONT "???????? ");
4363 + else
4364 + printk(KERN_CONT "%08x ", c);
4365 + }
4366 + printk("\n");
4367 +}
4368 +#endif
4369 +
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373 @@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377 +
4378 +#ifdef CONFIG_PAX_PAGEEXEC
4379 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380 + up_read(&mm->mmap_sem);
4381 + switch (pax_handle_fetch_fault(regs)) {
4382 +
4383 +#ifdef CONFIG_PAX_EMUPLT
4384 + case 2:
4385 + case 3:
4386 + return;
4387 +#endif
4388 +
4389 + }
4390 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391 + do_group_exit(SIGKILL);
4392 + }
4393 +#endif
4394 +
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398 diff -urNp linux-3.0.4/arch/sparc/mm/fault_64.c linux-3.0.4/arch/sparc/mm/fault_64.c
4399 --- linux-3.0.4/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400 +++ linux-3.0.4/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401 @@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405 +#include <linux/slab.h>
4406 +#include <linux/pagemap.h>
4407 +#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 +#ifdef CONFIG_PAX_DLRESOLVE
4426 +static void pax_emuplt_close(struct vm_area_struct *vma)
4427 +{
4428 + vma->vm_mm->call_dl_resolve = 0UL;
4429 +}
4430 +
4431 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432 +{
4433 + unsigned int *kaddr;
4434 +
4435 + vmf->page = alloc_page(GFP_HIGHUSER);
4436 + if (!vmf->page)
4437 + return VM_FAULT_OOM;
4438 +
4439 + kaddr = kmap(vmf->page);
4440 + memset(kaddr, 0, PAGE_SIZE);
4441 + kaddr[0] = 0x9DE3BFA8U; /* save */
4442 + flush_dcache_page(vmf->page);
4443 + kunmap(vmf->page);
4444 + return VM_FAULT_MAJOR;
4445 +}
4446 +
4447 +static const struct vm_operations_struct pax_vm_ops = {
4448 + .close = pax_emuplt_close,
4449 + .fault = pax_emuplt_fault
4450 +};
4451 +
4452 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453 +{
4454 + int ret;
4455 +
4456 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4457 + vma->vm_mm = current->mm;
4458 + vma->vm_start = addr;
4459 + vma->vm_end = addr + PAGE_SIZE;
4460 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462 + vma->vm_ops = &pax_vm_ops;
4463 +
4464 + ret = insert_vm_struct(current->mm, vma);
4465 + if (ret)
4466 + return ret;
4467 +
4468 + ++current->mm->total_vm;
4469 + return 0;
4470 +}
4471 +#endif
4472 +
4473 +/*
4474 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4475 + *
4476 + * returns 1 when task should be killed
4477 + * 2 when patched PLT trampoline was detected
4478 + * 3 when unpatched PLT trampoline was detected
4479 + */
4480 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4481 +{
4482 +
4483 +#ifdef CONFIG_PAX_EMUPLT
4484 + int err;
4485 +
4486 + do { /* PaX: patched PLT emulation #1 */
4487 + unsigned int sethi1, sethi2, jmpl;
4488 +
4489 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4490 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492 +
4493 + if (err)
4494 + break;
4495 +
4496 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499 + {
4500 + unsigned long addr;
4501 +
4502 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503 + addr = regs->u_regs[UREG_G1];
4504 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505 +
4506 + if (test_thread_flag(TIF_32BIT))
4507 + addr &= 0xFFFFFFFFUL;
4508 +
4509 + regs->tpc = addr;
4510 + regs->tnpc = addr+4;
4511 + return 2;
4512 + }
4513 + } while (0);
4514 +
4515 + { /* PaX: patched PLT emulation #2 */
4516 + unsigned int ba;
4517 +
4518 + err = get_user(ba, (unsigned int *)regs->tpc);
4519 +
4520 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521 + unsigned long addr;
4522 +
4523 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524 +
4525 + if (test_thread_flag(TIF_32BIT))
4526 + addr &= 0xFFFFFFFFUL;
4527 +
4528 + regs->tpc = addr;
4529 + regs->tnpc = addr+4;
4530 + return 2;
4531 + }
4532 + }
4533 +
4534 + do { /* PaX: patched PLT emulation #3 */
4535 + unsigned int sethi, jmpl, nop;
4536 +
4537 + err = get_user(sethi, (unsigned int *)regs->tpc);
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540 +
4541 + if (err)
4542 + break;
4543 +
4544 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546 + nop == 0x01000000U)
4547 + {
4548 + unsigned long addr;
4549 +
4550 + addr = (sethi & 0x003FFFFFU) << 10;
4551 + regs->u_regs[UREG_G1] = addr;
4552 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553 +
4554 + if (test_thread_flag(TIF_32BIT))
4555 + addr &= 0xFFFFFFFFUL;
4556 +
4557 + regs->tpc = addr;
4558 + regs->tnpc = addr+4;
4559 + return 2;
4560 + }
4561 + } while (0);
4562 +
4563 + do { /* PaX: patched PLT emulation #4 */
4564 + unsigned int sethi, mov1, call, mov2;
4565 +
4566 + err = get_user(sethi, (unsigned int *)regs->tpc);
4567 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + mov1 == 0x8210000FU &&
4576 + (call & 0xC0000000U) == 0x40000000U &&
4577 + mov2 == 0x9E100001U)
4578 + {
4579 + unsigned long addr;
4580 +
4581 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #5 */
4594 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604 +
4605 + if (err)
4606 + break;
4607 +
4608 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4612 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613 + sllx == 0x83287020U &&
4614 + jmpl == 0x81C04005U &&
4615 + nop == 0x01000000U)
4616 + {
4617 + unsigned long addr;
4618 +
4619 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620 + regs->u_regs[UREG_G1] <<= 32;
4621 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623 + regs->tpc = addr;
4624 + regs->tnpc = addr+4;
4625 + return 2;
4626 + }
4627 + } while (0);
4628 +
4629 + do { /* PaX: patched PLT emulation #6 */
4630 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631 +
4632 + err = get_user(sethi, (unsigned int *)regs->tpc);
4633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639 +
4640 + if (err)
4641 + break;
4642 +
4643 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646 + sllx == 0x83287020U &&
4647 + (or & 0xFFFFE000U) == 0x8A116000U &&
4648 + jmpl == 0x81C04005U &&
4649 + nop == 0x01000000U)
4650 + {
4651 + unsigned long addr;
4652 +
4653 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654 + regs->u_regs[UREG_G1] <<= 32;
4655 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657 + regs->tpc = addr;
4658 + regs->tnpc = addr+4;
4659 + return 2;
4660 + }
4661 + } while (0);
4662 +
4663 + do { /* PaX: unpatched PLT emulation step 1 */
4664 + unsigned int sethi, ba, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->tpc);
4667 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned long addr;
4678 + unsigned int save, call;
4679 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680 +
4681 + if ((ba & 0xFFC00000U) == 0x30800000U)
4682 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683 + else
4684 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685 +
4686 + if (test_thread_flag(TIF_32BIT))
4687 + addr &= 0xFFFFFFFFUL;
4688 +
4689 + err = get_user(save, (unsigned int *)addr);
4690 + err |= get_user(call, (unsigned int *)(addr+4));
4691 + err |= get_user(nop, (unsigned int *)(addr+8));
4692 + if (err)
4693 + break;
4694 +
4695 +#ifdef CONFIG_PAX_DLRESOLVE
4696 + if (save == 0x9DE3BFA8U &&
4697 + (call & 0xC0000000U) == 0x40000000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + struct vm_area_struct *vma;
4701 + unsigned long call_dl_resolve;
4702 +
4703 + down_read(&current->mm->mmap_sem);
4704 + call_dl_resolve = current->mm->call_dl_resolve;
4705 + up_read(&current->mm->mmap_sem);
4706 + if (likely(call_dl_resolve))
4707 + goto emulate;
4708 +
4709 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710 +
4711 + down_write(&current->mm->mmap_sem);
4712 + if (current->mm->call_dl_resolve) {
4713 + call_dl_resolve = current->mm->call_dl_resolve;
4714 + up_write(&current->mm->mmap_sem);
4715 + if (vma)
4716 + kmem_cache_free(vm_area_cachep, vma);
4717 + goto emulate;
4718 + }
4719 +
4720 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722 + up_write(&current->mm->mmap_sem);
4723 + if (vma)
4724 + kmem_cache_free(vm_area_cachep, vma);
4725 + return 1;
4726 + }
4727 +
4728 + if (pax_insert_vma(vma, call_dl_resolve)) {
4729 + up_write(&current->mm->mmap_sem);
4730 + kmem_cache_free(vm_area_cachep, vma);
4731 + return 1;
4732 + }
4733 +
4734 + current->mm->call_dl_resolve = call_dl_resolve;
4735 + up_write(&current->mm->mmap_sem);
4736 +
4737 +emulate:
4738 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739 + regs->tpc = call_dl_resolve;
4740 + regs->tnpc = addr+4;
4741 + return 3;
4742 + }
4743 +#endif
4744 +
4745 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746 + if ((save & 0xFFC00000U) == 0x05000000U &&
4747 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4748 + nop == 0x01000000U)
4749 + {
4750 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751 + regs->u_regs[UREG_G2] = addr + 4;
4752 + addr = (save & 0x003FFFFFU) << 10;
4753 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754 +
4755 + if (test_thread_flag(TIF_32BIT))
4756 + addr &= 0xFFFFFFFFUL;
4757 +
4758 + regs->tpc = addr;
4759 + regs->tnpc = addr+4;
4760 + return 3;
4761 + }
4762 +
4763 + /* PaX: 64-bit PLT stub */
4764 + err = get_user(sethi1, (unsigned int *)addr);
4765 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4766 + err |= get_user(or1, (unsigned int *)(addr+8));
4767 + err |= get_user(or2, (unsigned int *)(addr+12));
4768 + err |= get_user(sllx, (unsigned int *)(addr+16));
4769 + err |= get_user(add, (unsigned int *)(addr+20));
4770 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4771 + err |= get_user(nop, (unsigned int *)(addr+28));
4772 + if (err)
4773 + break;
4774 +
4775 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4778 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779 + sllx == 0x89293020U &&
4780 + add == 0x8A010005U &&
4781 + jmpl == 0x89C14000U &&
4782 + nop == 0x01000000U)
4783 + {
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786 + regs->u_regs[UREG_G4] <<= 32;
4787 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789 + regs->u_regs[UREG_G4] = addr + 24;
4790 + addr = regs->u_regs[UREG_G5];
4791 + regs->tpc = addr;
4792 + regs->tnpc = addr+4;
4793 + return 3;
4794 + }
4795 + }
4796 + } while (0);
4797 +
4798 +#ifdef CONFIG_PAX_DLRESOLVE
4799 + do { /* PaX: unpatched PLT emulation step 2 */
4800 + unsigned int save, call, nop;
4801 +
4802 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4803 + err |= get_user(call, (unsigned int *)regs->tpc);
4804 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805 + if (err)
4806 + break;
4807 +
4808 + if (save == 0x9DE3BFA8U &&
4809 + (call & 0xC0000000U) == 0x40000000U &&
4810 + nop == 0x01000000U)
4811 + {
4812 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813 +
4814 + if (test_thread_flag(TIF_32BIT))
4815 + dl_resolve &= 0xFFFFFFFFUL;
4816 +
4817 + regs->u_regs[UREG_RETPC] = regs->tpc;
4818 + regs->tpc = dl_resolve;
4819 + regs->tnpc = dl_resolve+4;
4820 + return 3;
4821 + }
4822 + } while (0);
4823 +#endif
4824 +
4825 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826 + unsigned int sethi, ba, nop;
4827 +
4828 + err = get_user(sethi, (unsigned int *)regs->tpc);
4829 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831 +
4832 + if (err)
4833 + break;
4834 +
4835 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836 + (ba & 0xFFF00000U) == 0x30600000U &&
4837 + nop == 0x01000000U)
4838 + {
4839 + unsigned long addr;
4840 +
4841 + addr = (sethi & 0x003FFFFFU) << 10;
4842 + regs->u_regs[UREG_G1] = addr;
4843 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844 +
4845 + if (test_thread_flag(TIF_32BIT))
4846 + addr &= 0xFFFFFFFFUL;
4847 +
4848 + regs->tpc = addr;
4849 + regs->tnpc = addr+4;
4850 + return 2;
4851 + }
4852 + } while (0);
4853 +
4854 +#endif
4855 +
4856 + return 1;
4857 +}
4858 +
4859 +void pax_report_insns(void *pc, void *sp)
4860 +{
4861 + unsigned long i;
4862 +
4863 + printk(KERN_ERR "PAX: bytes at PC: ");
4864 + for (i = 0; i < 8; i++) {
4865 + unsigned int c;
4866 + if (get_user(c, (unsigned int *)pc+i))
4867 + printk(KERN_CONT "???????? ");
4868 + else
4869 + printk(KERN_CONT "%08x ", c);
4870 + }
4871 + printk("\n");
4872 +}
4873 +#endif
4874 +
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882 +#ifdef CONFIG_PAX_PAGEEXEC
4883 + /* PaX: detect ITLB misses on non-exec pages */
4884 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886 + {
4887 + if (address != regs->tpc)
4888 + goto good_area;
4889 +
4890 + up_read(&mm->mmap_sem);
4891 + switch (pax_handle_fetch_fault(regs)) {
4892 +
4893 +#ifdef CONFIG_PAX_EMUPLT
4894 + case 2:
4895 + case 3:
4896 + return;
4897 +#endif
4898 +
4899 + }
4900 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901 + do_group_exit(SIGKILL);
4902 + }
4903 +#endif
4904 +
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908 diff -urNp linux-3.0.4/arch/sparc/mm/hugetlbpage.c linux-3.0.4/arch/sparc/mm/hugetlbpage.c
4909 --- linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910 +++ linux-3.0.4/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911 @@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915 - if (likely(!vma || addr + len <= vma->vm_start)) {
4916 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924 - if (!vma || addr <= vma->vm_start) {
4925 + if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4934 + addr = mm->mmap_base - len;
4935
4936 do {
4937 + addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944 - if (likely(!vma || addr+len <= vma->vm_start)) {
4945 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953 - addr = (vma->vm_start-len) & HPAGE_MASK;
4954 - } while (likely(len < vma->vm_start));
4955 + addr = skip_heap_stack_gap(vma, len);
4956 + } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964 - if (task_size - len >= addr &&
4965 - (!vma || addr + len <= vma->vm_start))
4966 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970 diff -urNp linux-3.0.4/arch/sparc/mm/init_32.c linux-3.0.4/arch/sparc/mm/init_32.c
4971 --- linux-3.0.4/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972 +++ linux-3.0.4/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973 @@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979 +
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983 @@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987 - protection_map[1] = PAGE_READONLY;
4988 - protection_map[2] = PAGE_COPY;
4989 - protection_map[3] = PAGE_COPY;
4990 + protection_map[1] = PAGE_READONLY_NOEXEC;
4991 + protection_map[2] = PAGE_COPY_NOEXEC;
4992 + protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998 - protection_map[9] = PAGE_READONLY;
4999 - protection_map[10] = PAGE_SHARED;
5000 - protection_map[11] = PAGE_SHARED;
5001 + protection_map[9] = PAGE_READONLY_NOEXEC;
5002 + protection_map[10] = PAGE_SHARED_NOEXEC;
5003 + protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007 diff -urNp linux-3.0.4/arch/sparc/mm/Makefile linux-3.0.4/arch/sparc/mm/Makefile
5008 --- linux-3.0.4/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009 +++ linux-3.0.4/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010 @@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014 -ccflags-y := -Werror
5015 +#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019 diff -urNp linux-3.0.4/arch/sparc/mm/srmmu.c linux-3.0.4/arch/sparc/mm/srmmu.c
5020 --- linux-3.0.4/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021 +++ linux-3.0.4/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026 +
5027 +#ifdef CONFIG_PAX_PAGEEXEC
5028 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031 +#endif
5032 +
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036 diff -urNp linux-3.0.4/arch/um/include/asm/kmap_types.h linux-3.0.4/arch/um/include/asm/kmap_types.h
5037 --- linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038 +++ linux-3.0.4/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039 @@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043 + KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047 diff -urNp linux-3.0.4/arch/um/include/asm/page.h linux-3.0.4/arch/um/include/asm/page.h
5048 --- linux-3.0.4/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049 +++ linux-3.0.4/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050 @@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054 +#define ktla_ktva(addr) (addr)
5055 +#define ktva_ktla(addr) (addr)
5056 +
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060 diff -urNp linux-3.0.4/arch/um/kernel/process.c linux-3.0.4/arch/um/kernel/process.c
5061 --- linux-3.0.4/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062 +++ linux-3.0.4/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067 -/*
5068 - * Only x86 and x86_64 have an arch_align_stack().
5069 - * All other arches have "#define arch_align_stack(x) (x)"
5070 - * in their asm/system.h
5071 - * As this is included in UML from asm-um/system-generic.h,
5072 - * we can use it to behave as the subarch does.
5073 - */
5074 -#ifndef arch_align_stack
5075 -unsigned long arch_align_stack(unsigned long sp)
5076 -{
5077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078 - sp -= get_random_int() % 8192;
5079 - return sp & ~0xf;
5080 -}
5081 -#endif
5082 -
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086 diff -urNp linux-3.0.4/arch/um/sys-i386/syscalls.c linux-3.0.4/arch/um/sys-i386/syscalls.c
5087 --- linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088 +++ linux-3.0.4/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089 @@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094 +{
5095 + unsigned long pax_task_size = TASK_SIZE;
5096 +
5097 +#ifdef CONFIG_PAX_SEGMEXEC
5098 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099 + pax_task_size = SEGMEXEC_TASK_SIZE;
5100 +#endif
5101 +
5102 + if (len > pax_task_size || addr > pax_task_size - len)
5103 + return -EINVAL;
5104 +
5105 + return 0;
5106 +}
5107 +
5108 /*
5109 * The prototype on i386 is:
5110 *
5111 diff -urNp linux-3.0.4/arch/x86/boot/bitops.h linux-3.0.4/arch/x86/boot/bitops.h
5112 --- linux-3.0.4/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113 +++ linux-3.0.4/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132 diff -urNp linux-3.0.4/arch/x86/boot/boot.h linux-3.0.4/arch/x86/boot/boot.h
5133 --- linux-3.0.4/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134 +++ linux-3.0.4/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139 - asm("movw %%ds,%0" : "=rm" (seg));
5140 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148 - asm("repe; cmpsb; setnz %0"
5149 + asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_32.S linux-3.0.4/arch/x86/boot/compressed/head_32.S
5154 --- linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155 +++ linux-3.0.4/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160 - movl $LOAD_PHYSICAL_ADDR, %ebx
5161 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165 @@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169 - subl $LOAD_PHYSICAL_ADDR, %ebx
5170 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174 @@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178 - testl %ecx, %ecx
5179 - jz 2f
5180 + jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184 diff -urNp linux-3.0.4/arch/x86/boot/compressed/head_64.S linux-3.0.4/arch/x86/boot/compressed/head_64.S
5185 --- linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186 +++ linux-3.0.4/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191 - movl $LOAD_PHYSICAL_ADDR, %ebx
5192 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200 - movq $LOAD_PHYSICAL_ADDR, %rbp
5201 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205 diff -urNp linux-3.0.4/arch/x86/boot/compressed/Makefile linux-3.0.4/arch/x86/boot/compressed/Makefile
5206 --- linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207 +++ linux-3.0.4/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212 +ifdef CONSTIFY_PLUGIN
5213 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214 +endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218 diff -urNp linux-3.0.4/arch/x86/boot/compressed/misc.c linux-3.0.4/arch/x86/boot/compressed/misc.c
5219 --- linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220 +++ linux-3.0.4/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239 diff -urNp linux-3.0.4/arch/x86/boot/compressed/relocs.c linux-3.0.4/arch/x86/boot/compressed/relocs.c
5240 --- linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241 +++ linux-3.0.4/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242 @@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246 +#include "../../../../include/generated/autoconf.h"
5247 +
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250 +static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258 +static void read_phdrs(FILE *fp)
5259 +{
5260 + unsigned int i;
5261 +
5262 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263 + if (!phdr) {
5264 + die("Unable to allocate %d program headers\n",
5265 + ehdr.e_phnum);
5266 + }
5267 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268 + die("Seek to %d failed: %s\n",
5269 + ehdr.e_phoff, strerror(errno));
5270 + }
5271 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272 + die("Cannot read ELF program headers: %s\n",
5273 + strerror(errno));
5274 + }
5275 + for(i = 0; i < ehdr.e_phnum; i++) {
5276 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284 + }
5285 +
5286 +}
5287 +
5288 static void read_shdrs(FILE *fp)
5289 {
5290 - int i;
5291 + unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299 - int i;
5300 + unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308 - int i,j;
5309 + unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317 - int i,j;
5318 + unsigned int i,j;
5319 + uint32_t base;
5320 +
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328 + base = 0;
5329 + for (j = 0; j < ehdr.e_phnum; j++) {
5330 + if (phdr[j].p_type != PT_LOAD )
5331 + continue;
5332 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333 + continue;
5334 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335 + break;
5336 + }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5340 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348 - int i;
5349 + unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356 - int j;
5357 + unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365 - int i, printed = 0;
5366 + unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373 - int j;
5374 + unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382 - int i;
5383 + unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389 - int j;
5390 + unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400 + continue;
5401 +
5402 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405 + continue;
5406 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407 + continue;
5408 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409 + continue;
5410 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411 + continue;
5412 +#endif
5413 +
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421 - int i;
5422 + unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430 + read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434 diff -urNp linux-3.0.4/arch/x86/boot/cpucheck.c linux-3.0.4/arch/x86/boot/cpucheck.c
5435 --- linux-3.0.4/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436 +++ linux-3.0.4/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437 @@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441 - asm("movl %%cr0,%0" : "=r" (cr0));
5442 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450 - asm("pushfl ; "
5451 + asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455 @@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459 - asm("cpuid"
5460 + asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464 @@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468 - asm("cpuid"
5469 + asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473 @@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477 - asm("cpuid"
5478 + asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482 @@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486 - asm("cpuid"
5487 + asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521 - asm("cpuid"
5522 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524 + asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532 diff -urNp linux-3.0.4/arch/x86/boot/header.S linux-3.0.4/arch/x86/boot/header.S
5533 --- linux-3.0.4/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534 +++ linux-3.0.4/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544 diff -urNp linux-3.0.4/arch/x86/boot/Makefile linux-3.0.4/arch/x86/boot/Makefile
5545 --- linux-3.0.4/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546 +++ linux-3.0.4/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551 +ifdef CONSTIFY_PLUGIN
5552 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553 +endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557 diff -urNp linux-3.0.4/arch/x86/boot/memory.c linux-3.0.4/arch/x86/boot/memory.c
5558 --- linux-3.0.4/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559 +++ linux-3.0.4/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560 @@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564 - int count = 0;
5565 + unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569 diff -urNp linux-3.0.4/arch/x86/boot/video.c linux-3.0.4/arch/x86/boot/video.c
5570 --- linux-3.0.4/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571 +++ linux-3.0.4/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576 - int i, len = 0;
5577 + unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581 diff -urNp linux-3.0.4/arch/x86/boot/video-vesa.c linux-3.0.4/arch/x86/boot/video-vesa.c
5582 --- linux-3.0.4/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583 +++ linux-3.0.4/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588 + boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592 diff -urNp linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S
5593 --- linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5594 +++ linux-3.0.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-09-17 18:31:51.000000000 -0400
5595 @@ -71,6 +71,12 @@ FUNC: movq r1,r2; \
5596 je B192; \
5597 leaq 32(r9),r9;
5598
5599 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5600 +#define ret orb $0x80, 0x7(%rsp); ret
5601 +#else
5602 +#define ret ret
5603 +#endif
5604 +
5605 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5606 movq r1,r2; \
5607 movq r3,r4; \
5608 diff -urNp linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S
5609 --- linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5610 +++ linux-3.0.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-09-17 18:31:51.000000000 -0400
5611 @@ -790,6 +790,9 @@ ECRYPT_encrypt_bytes:
5612 add %r11,%rsp
5613 mov %rdi,%rax
5614 mov %rsi,%rdx
5615 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5616 + orb $0x80, 0x7(%rsp)
5617 +#endif
5618 ret
5619 # bytesatleast65:
5620 ._bytesatleast65:
5621 @@ -891,6 +894,9 @@ ECRYPT_keysetup:
5622 add %r11,%rsp
5623 mov %rdi,%rax
5624 mov %rsi,%rdx
5625 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5626 + orb $0x80, 0x7(%rsp)
5627 +#endif
5628 ret
5629 # enter ECRYPT_ivsetup
5630 .text
5631 @@ -917,4 +923,7 @@ ECRYPT_ivsetup:
5632 add %r11,%rsp
5633 mov %rdi,%rax
5634 mov %rsi,%rdx
5635 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5636 + orb $0x80, 0x7(%rsp)
5637 +#endif
5638 ret
5639 diff -urNp linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S
5640 --- linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5641 +++ linux-3.0.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-09-17 18:31:51.000000000 -0400
5642 @@ -269,6 +269,9 @@ twofish_enc_blk:
5643
5644 popq R1
5645 movq $1,%rax
5646 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5647 + orb $0x80, 0x7(%rsp)
5648 +#endif
5649 ret
5650
5651 twofish_dec_blk:
5652 @@ -321,4 +324,7 @@ twofish_dec_blk:
5653
5654 popq R1
5655 movq $1,%rax
5656 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
5657 + orb $0x80, 0x7(%rsp)
5658 +#endif
5659 ret
5660 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_aout.c linux-3.0.4/arch/x86/ia32/ia32_aout.c
5661 --- linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5662 +++ linux-3.0.4/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5663 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5664 unsigned long dump_start, dump_size;
5665 struct user32 dump;
5666
5667 + memset(&dump, 0, sizeof(dump));
5668 +
5669 fs = get_fs();
5670 set_fs(KERNEL_DS);
5671 has_dumped = 1;
5672 diff -urNp linux-3.0.4/arch/x86/ia32/ia32entry.S linux-3.0.4/arch/x86/ia32/ia32entry.S
5673 --- linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5674 +++ linux-3.0.4/arch/x86/ia32/ia32entry.S 2011-08-25 17:36:37.000000000 -0400
5675 @@ -13,6 +13,7 @@
5676 #include <asm/thread_info.h>
5677 #include <asm/segment.h>
5678 #include <asm/irqflags.h>
5679 +#include <asm/pgtable.h>
5680 #include <linux/linkage.h>
5681
5682 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5683 @@ -95,6 +96,29 @@ ENTRY(native_irq_enable_sysexit)
5684 ENDPROC(native_irq_enable_sysexit)
5685 #endif
5686
5687 + .macro pax_enter_kernel_user
5688 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5689 + call pax_enter_kernel_user
5690 +#endif
5691 + .endm
5692 +
5693 + .macro pax_exit_kernel_user
5694 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5695 + call pax_exit_kernel_user
5696 +#endif
5697 +#ifdef CONFIG_PAX_RANDKSTACK
5698 + pushq %rax
5699 + call pax_randomize_kstack
5700 + popq %rax
5701 +#endif
5702 + .endm
5703 +
5704 + .macro pax_erase_kstack
5705 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5706 + call pax_erase_kstack
5707 +#endif
5708 + .endm
5709 +
5710 /*
5711 * 32bit SYSENTER instruction entry.
5712 *
5713 @@ -121,7 +145,7 @@ ENTRY(ia32_sysenter_target)
5714 CFI_REGISTER rsp,rbp
5715 SWAPGS_UNSAFE_STACK
5716 movq PER_CPU_VAR(kernel_stack), %rsp
5717 - addq $(KERNEL_STACK_OFFSET),%rsp
5718 + pax_enter_kernel_user
5719 /*
5720 * No need to follow this irqs on/off section: the syscall
5721 * disabled irqs, here we enable it straight after entry:
5722 @@ -134,7 +158,8 @@ ENTRY(ia32_sysenter_target)
5723 CFI_REL_OFFSET rsp,0
5724 pushfq_cfi
5725 /*CFI_REL_OFFSET rflags,0*/
5726 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5727 + GET_THREAD_INFO(%r10)
5728 + movl TI_sysenter_return(%r10), %r10d
5729 CFI_REGISTER rip,r10
5730 pushq_cfi $__USER32_CS
5731 /*CFI_REL_OFFSET cs,0*/
5732 @@ -146,6 +171,12 @@ ENTRY(ia32_sysenter_target)
5733 SAVE_ARGS 0,0,1
5734 /* no need to do an access_ok check here because rbp has been
5735 32bit zero extended */
5736 +
5737 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5738 + mov $PAX_USER_SHADOW_BASE,%r10
5739 + add %r10,%rbp
5740 +#endif
5741 +
5742 1: movl (%rbp),%ebp
5743 .section __ex_table,"a"
5744 .quad 1b,ia32_badarg
5745 @@ -168,6 +199,8 @@ sysenter_dispatch:
5746 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5747 jnz sysexit_audit
5748 sysexit_from_sys_call:
5749 + pax_exit_kernel_user
5750 + pax_erase_kstack
5751 andl $~TS_COMPAT,TI_status(%r10)
5752 /* clear IF, that popfq doesn't enable interrupts early */
5753 andl $~0x200,EFLAGS-R11(%rsp)
5754 @@ -194,6 +227,9 @@ sysexit_from_sys_call:
5755 movl %eax,%esi /* 2nd arg: syscall number */
5756 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5757 call audit_syscall_entry
5758 +
5759 + pax_erase_kstack
5760 +
5761 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5762 cmpq $(IA32_NR_syscalls-1),%rax
5763 ja ia32_badsys
5764 @@ -246,6 +282,9 @@ sysenter_tracesys:
5765 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5766 movq %rsp,%rdi /* &pt_regs -> arg1 */
5767 call syscall_trace_enter
5768 +
5769 + pax_erase_kstack
5770 +
5771 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5772 RESTORE_REST
5773 cmpq $(IA32_NR_syscalls-1),%rax
5774 @@ -277,19 +316,24 @@ ENDPROC(ia32_sysenter_target)
5775 ENTRY(ia32_cstar_target)
5776 CFI_STARTPROC32 simple
5777 CFI_SIGNAL_FRAME
5778 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5779 + CFI_DEF_CFA rsp,0
5780 CFI_REGISTER rip,rcx
5781 /*CFI_REGISTER rflags,r11*/
5782 SWAPGS_UNSAFE_STACK
5783 movl %esp,%r8d
5784 CFI_REGISTER rsp,r8
5785 movq PER_CPU_VAR(kernel_stack),%rsp
5786 +
5787 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5788 + pax_enter_kernel_user
5789 +#endif
5790 +
5791 /*
5792 * No need to follow this irqs on/off section: the syscall
5793 * disabled irqs and here we enable it straight after entry:
5794 */
5795 ENABLE_INTERRUPTS(CLBR_NONE)
5796 - SAVE_ARGS 8,1,1
5797 + SAVE_ARGS 8*6,1,1
5798 movl %eax,%eax /* zero extension */
5799 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5800 movq %rcx,RIP-ARGOFFSET(%rsp)
5801 @@ -305,6 +349,12 @@ ENTRY(ia32_cstar_target)
5802 /* no need to do an access_ok check here because r8 has been
5803 32bit zero extended */
5804 /* hardware stack frame is complete now */
5805 +
5806 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5807 + mov $PAX_USER_SHADOW_BASE,%r10
5808 + add %r10,%r8
5809 +#endif
5810 +
5811 1: movl (%r8),%r9d
5812 .section __ex_table,"a"
5813 .quad 1b,ia32_badarg
5814 @@ -327,6 +377,8 @@ cstar_dispatch:
5815 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5816 jnz sysretl_audit
5817 sysretl_from_sys_call:
5818 + pax_exit_kernel_user
5819 + pax_erase_kstack
5820 andl $~TS_COMPAT,TI_status(%r10)
5821 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5822 movl RIP-ARGOFFSET(%rsp),%ecx
5823 @@ -364,6 +416,9 @@ cstar_tracesys:
5824 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5825 movq %rsp,%rdi /* &pt_regs -> arg1 */
5826 call syscall_trace_enter
5827 +
5828 + pax_erase_kstack
5829 +
5830 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5831 RESTORE_REST
5832 xchgl %ebp,%r9d
5833 @@ -409,6 +464,7 @@ ENTRY(ia32_syscall)
5834 CFI_REL_OFFSET rip,RIP-RIP
5835 PARAVIRT_ADJUST_EXCEPTION_FRAME
5836 SWAPGS
5837 + pax_enter_kernel_user
5838 /*
5839 * No need to follow this irqs on/off section: the syscall
5840 * disabled irqs and here we enable it straight after entry:
5841 @@ -441,6 +497,9 @@ ia32_tracesys:
5842 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5843 movq %rsp,%rdi /* &pt_regs -> arg1 */
5844 call syscall_trace_enter
5845 +
5846 + pax_erase_kstack
5847 +
5848 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5849 RESTORE_REST
5850 cmpq $(IA32_NR_syscalls-1),%rax
5851 diff -urNp linux-3.0.4/arch/x86/ia32/ia32_signal.c linux-3.0.4/arch/x86/ia32/ia32_signal.c
5852 --- linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5853 +++ linux-3.0.4/arch/x86/ia32/ia32_signal.c 2011-08-23 21:47:55.000000000 -0400
5854 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5855 sp -= frame_size;
5856 /* Align the stack pointer according to the i386 ABI,
5857 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5858 - sp = ((sp + 4) & -16ul) - 4;
5859 + sp = ((sp - 12) & -16ul) - 4;
5860 return (void __user *) sp;
5861 }
5862
5863 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5864 * These are actually not used anymore, but left because some
5865 * gdb versions depend on them as a marker.
5866 */
5867 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5868 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5869 } put_user_catch(err);
5870
5871 if (err)
5872 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5873 0xb8,
5874 __NR_ia32_rt_sigreturn,
5875 0x80cd,
5876 - 0,
5877 + 0
5878 };
5879
5880 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5881 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5882
5883 if (ka->sa.sa_flags & SA_RESTORER)
5884 restorer = ka->sa.sa_restorer;
5885 + else if (current->mm->context.vdso)
5886 + /* Return stub is in 32bit vsyscall page */
5887 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5888 else
5889 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5890 - rt_sigreturn);
5891 + restorer = &frame->retcode;
5892 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5893
5894 /*
5895 * Not actually used anymore, but left because some gdb
5896 * versions need it.
5897 */
5898 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5899 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5900 } put_user_catch(err);
5901
5902 if (err)
5903 diff -urNp linux-3.0.4/arch/x86/include/asm/alternative.h linux-3.0.4/arch/x86/include/asm/alternative.h
5904 --- linux-3.0.4/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
5905 +++ linux-3.0.4/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
5906 @@ -93,7 +93,7 @@ static inline int alternatives_text_rese
5907 ".section .discard,\"aw\",@progbits\n" \
5908 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5909 ".previous\n" \
5910 - ".section .altinstr_replacement, \"ax\"\n" \
5911 + ".section .altinstr_replacement, \"a\"\n" \
5912 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5913 ".previous"
5914
5915 diff -urNp linux-3.0.4/arch/x86/include/asm/apic.h linux-3.0.4/arch/x86/include/asm/apic.h
5916 --- linux-3.0.4/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
5917 +++ linux-3.0.4/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
5918 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5919
5920 #ifdef CONFIG_X86_LOCAL_APIC
5921
5922 -extern unsigned int apic_verbosity;
5923 +extern int apic_verbosity;
5924 extern int local_apic_timer_c2_ok;
5925
5926 extern int disable_apic;
5927 diff -urNp linux-3.0.4/arch/x86/include/asm/apm.h linux-3.0.4/arch/x86/include/asm/apm.h
5928 --- linux-3.0.4/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
5929 +++ linux-3.0.4/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
5930 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5931 __asm__ __volatile__(APM_DO_ZERO_SEGS
5932 "pushl %%edi\n\t"
5933 "pushl %%ebp\n\t"
5934 - "lcall *%%cs:apm_bios_entry\n\t"
5935 + "lcall *%%ss:apm_bios_entry\n\t"
5936 "setc %%al\n\t"
5937 "popl %%ebp\n\t"
5938 "popl %%edi\n\t"
5939 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5940 __asm__ __volatile__(APM_DO_ZERO_SEGS
5941 "pushl %%edi\n\t"
5942 "pushl %%ebp\n\t"
5943 - "lcall *%%cs:apm_bios_entry\n\t"
5944 + "lcall *%%ss:apm_bios_entry\n\t"
5945 "setc %%bl\n\t"
5946 "popl %%ebp\n\t"
5947 "popl %%edi\n\t"
5948 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_32.h linux-3.0.4/arch/x86/include/asm/atomic64_32.h
5949 --- linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
5950 +++ linux-3.0.4/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
5951 @@ -12,6 +12,14 @@ typedef struct {
5952 u64 __aligned(8) counter;
5953 } atomic64_t;
5954
5955 +#ifdef CONFIG_PAX_REFCOUNT
5956 +typedef struct {
5957 + u64 __aligned(8) counter;
5958 +} atomic64_unchecked_t;
5959 +#else
5960 +typedef atomic64_t atomic64_unchecked_t;
5961 +#endif
5962 +
5963 #define ATOMIC64_INIT(val) { (val) }
5964
5965 #ifdef CONFIG_X86_CMPXCHG64
5966 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5967 }
5968
5969 /**
5970 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5971 + * @p: pointer to type atomic64_unchecked_t
5972 + * @o: expected value
5973 + * @n: new value
5974 + *
5975 + * Atomically sets @v to @n if it was equal to @o and returns
5976 + * the old value.
5977 + */
5978 +
5979 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5980 +{
5981 + return cmpxchg64(&v->counter, o, n);
5982 +}
5983 +
5984 +/**
5985 * atomic64_xchg - xchg atomic64 variable
5986 * @v: pointer to type atomic64_t
5987 * @n: value to assign
5988 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5989 }
5990
5991 /**
5992 + * atomic64_set_unchecked - set atomic64 variable
5993 + * @v: pointer to type atomic64_unchecked_t
5994 + * @n: value to assign
5995 + *
5996 + * Atomically sets the value of @v to @n.
5997 + */
5998 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5999 +{
6000 + unsigned high = (unsigned)(i >> 32);
6001 + unsigned low = (unsigned)i;
6002 + asm volatile(ATOMIC64_ALTERNATIVE(set)
6003 + : "+b" (low), "+c" (high)
6004 + : "S" (v)
6005 + : "eax", "edx", "memory"
6006 + );
6007 +}
6008 +
6009 +/**
6010 * atomic64_read - read atomic64 variable
6011 * @v: pointer to type atomic64_t
6012 *
6013 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6014 }
6015
6016 /**
6017 + * atomic64_read_unchecked - read atomic64 variable
6018 + * @v: pointer to type atomic64_unchecked_t
6019 + *
6020 + * Atomically reads the value of @v and returns it.
6021 + */
6022 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6023 +{
6024 + long long r;
6025 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6026 + : "=A" (r), "+c" (v)
6027 + : : "memory"
6028 + );
6029 + return r;
6030 + }
6031 +
6032 +/**
6033 * atomic64_add_return - add and return
6034 * @i: integer value to add
6035 * @v: pointer to type atomic64_t
6036 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6037 return i;
6038 }
6039
6040 +/**
6041 + * atomic64_add_return_unchecked - add and return
6042 + * @i: integer value to add
6043 + * @v: pointer to type atomic64_unchecked_t
6044 + *
6045 + * Atomically adds @i to @v and returns @i + *@v
6046 + */
6047 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6048 +{
6049 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6050 + : "+A" (i), "+c" (v)
6051 + : : "memory"
6052 + );
6053 + return i;
6054 +}
6055 +
6056 /*
6057 * Other variants with different arithmetic operators:
6058 */
6059 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6060 return a;
6061 }
6062
6063 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6064 +{
6065 + long long a;
6066 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6067 + : "=A" (a)
6068 + : "S" (v)
6069 + : "memory", "ecx"
6070 + );
6071 + return a;
6072 +}
6073 +
6074 static inline long long atomic64_dec_return(atomic64_t *v)
6075 {
6076 long long a;
6077 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6078 }
6079
6080 /**
6081 + * atomic64_add_unchecked - add integer to atomic64 variable
6082 + * @i: integer value to add
6083 + * @v: pointer to type atomic64_unchecked_t
6084 + *
6085 + * Atomically adds @i to @v.
6086 + */
6087 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6088 +{
6089 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6090 + : "+A" (i), "+c" (v)
6091 + : : "memory"
6092 + );
6093 + return i;
6094 +}
6095 +
6096 +/**
6097 * atomic64_sub - subtract the atomic64 variable
6098 * @i: integer value to subtract
6099 * @v: pointer to type atomic64_t
6100 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic64_64.h linux-3.0.4/arch/x86/include/asm/atomic64_64.h
6101 --- linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6102 +++ linux-3.0.4/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6103 @@ -18,7 +18,19 @@
6104 */
6105 static inline long atomic64_read(const atomic64_t *v)
6106 {
6107 - return (*(volatile long *)&(v)->counter);
6108 + return (*(volatile const long *)&(v)->counter);
6109 +}
6110 +
6111 +/**
6112 + * atomic64_read_unchecked - read atomic64 variable
6113 + * @v: pointer of type atomic64_unchecked_t
6114 + *
6115 + * Atomically reads the value of @v.
6116 + * Doesn't imply a read memory barrier.
6117 + */
6118 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6119 +{
6120 + return (*(volatile const long *)&(v)->counter);
6121 }
6122
6123 /**
6124 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6125 }
6126
6127 /**
6128 + * atomic64_set_unchecked - set atomic64 variable
6129 + * @v: pointer to type atomic64_unchecked_t
6130 + * @i: required value
6131 + *
6132 + * Atomically sets the value of @v to @i.
6133 + */
6134 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6135 +{
6136 + v->counter = i;
6137 +}
6138 +
6139 +/**
6140 * atomic64_add - add integer to atomic64 variable
6141 * @i: integer value to add
6142 * @v: pointer to type atomic64_t
6143 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6144 */
6145 static inline void atomic64_add(long i, atomic64_t *v)
6146 {
6147 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6148 +
6149 +#ifdef CONFIG_PAX_REFCOUNT
6150 + "jno 0f\n"
6151 + LOCK_PREFIX "subq %1,%0\n"
6152 + "int $4\n0:\n"
6153 + _ASM_EXTABLE(0b, 0b)
6154 +#endif
6155 +
6156 + : "=m" (v->counter)
6157 + : "er" (i), "m" (v->counter));
6158 +}
6159 +
6160 +/**
6161 + * atomic64_add_unchecked - add integer to atomic64 variable
6162 + * @i: integer value to add
6163 + * @v: pointer to type atomic64_unchecked_t
6164 + *
6165 + * Atomically adds @i to @v.
6166 + */
6167 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6168 +{
6169 asm volatile(LOCK_PREFIX "addq %1,%0"
6170 : "=m" (v->counter)
6171 : "er" (i), "m" (v->counter));
6172 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6173 */
6174 static inline void atomic64_sub(long i, atomic64_t *v)
6175 {
6176 - asm volatile(LOCK_PREFIX "subq %1,%0"
6177 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6178 +
6179 +#ifdef CONFIG_PAX_REFCOUNT
6180 + "jno 0f\n"
6181 + LOCK_PREFIX "addq %1,%0\n"
6182 + "int $4\n0:\n"
6183 + _ASM_EXTABLE(0b, 0b)
6184 +#endif
6185 +
6186 + : "=m" (v->counter)
6187 + : "er" (i), "m" (v->counter));
6188 +}
6189 +
6190 +/**
6191 + * atomic64_sub_unchecked - subtract the atomic64 variable
6192 + * @i: integer value to subtract
6193 + * @v: pointer to type atomic64_unchecked_t
6194 + *
6195 + * Atomically subtracts @i from @v.
6196 + */
6197 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6198 +{
6199 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6200 : "=m" (v->counter)
6201 : "er" (i), "m" (v->counter));
6202 }
6203 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6204 {
6205 unsigned char c;
6206
6207 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6208 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6209 +
6210 +#ifdef CONFIG_PAX_REFCOUNT
6211 + "jno 0f\n"
6212 + LOCK_PREFIX "addq %2,%0\n"
6213 + "int $4\n0:\n"
6214 + _ASM_EXTABLE(0b, 0b)
6215 +#endif
6216 +
6217 + "sete %1\n"
6218 : "=m" (v->counter), "=qm" (c)
6219 : "er" (i), "m" (v->counter) : "memory");
6220 return c;
6221 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6222 */
6223 static inline void atomic64_inc(atomic64_t *v)
6224 {
6225 + asm volatile(LOCK_PREFIX "incq %0\n"
6226 +
6227 +#ifdef CONFIG_PAX_REFCOUNT
6228 + "jno 0f\n"
6229 + LOCK_PREFIX "decq %0\n"
6230 + "int $4\n0:\n"
6231 + _ASM_EXTABLE(0b, 0b)
6232 +#endif
6233 +
6234 + : "=m" (v->counter)
6235 + : "m" (v->counter));
6236 +}
6237 +
6238 +/**
6239 + * atomic64_inc_unchecked - increment atomic64 variable
6240 + * @v: pointer to type atomic64_unchecked_t
6241 + *
6242 + * Atomically increments @v by 1.
6243 + */
6244 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6245 +{
6246 asm volatile(LOCK_PREFIX "incq %0"
6247 : "=m" (v->counter)
6248 : "m" (v->counter));
6249 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6250 */
6251 static inline void atomic64_dec(atomic64_t *v)
6252 {
6253 - asm volatile(LOCK_PREFIX "decq %0"
6254 + asm volatile(LOCK_PREFIX "decq %0\n"
6255 +
6256 +#ifdef CONFIG_PAX_REFCOUNT
6257 + "jno 0f\n"
6258 + LOCK_PREFIX "incq %0\n"
6259 + "int $4\n0:\n"
6260 + _ASM_EXTABLE(0b, 0b)
6261 +#endif
6262 +
6263 + : "=m" (v->counter)
6264 + : "m" (v->counter));
6265 +}
6266 +
6267 +/**
6268 + * atomic64_dec_unchecked - decrement atomic64 variable
6269 + * @v: pointer to type atomic64_t
6270 + *
6271 + * Atomically decrements @v by 1.
6272 + */
6273 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6274 +{
6275 + asm volatile(LOCK_PREFIX "decq %0\n"
6276 : "=m" (v->counter)
6277 : "m" (v->counter));
6278 }
6279 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6280 {
6281 unsigned char c;
6282
6283 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6284 + asm volatile(LOCK_PREFIX "decq %0\n"
6285 +
6286 +#ifdef CONFIG_PAX_REFCOUNT
6287 + "jno 0f\n"
6288 + LOCK_PREFIX "incq %0\n"
6289 + "int $4\n0:\n"
6290 + _ASM_EXTABLE(0b, 0b)
6291 +#endif
6292 +
6293 + "sete %1\n"
6294 : "=m" (v->counter), "=qm" (c)
6295 : "m" (v->counter) : "memory");
6296 return c != 0;
6297 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6298 {
6299 unsigned char c;
6300
6301 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6302 + asm volatile(LOCK_PREFIX "incq %0\n"
6303 +
6304 +#ifdef CONFIG_PAX_REFCOUNT
6305 + "jno 0f\n"
6306 + LOCK_PREFIX "decq %0\n"
6307 + "int $4\n0:\n"
6308 + _ASM_EXTABLE(0b, 0b)
6309 +#endif
6310 +
6311 + "sete %1\n"
6312 : "=m" (v->counter), "=qm" (c)
6313 : "m" (v->counter) : "memory");
6314 return c != 0;
6315 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6316 {
6317 unsigned char c;
6318
6319 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6320 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6321 +
6322 +#ifdef CONFIG_PAX_REFCOUNT
6323 + "jno 0f\n"
6324 + LOCK_PREFIX "subq %2,%0\n"
6325 + "int $4\n0:\n"
6326 + _ASM_EXTABLE(0b, 0b)
6327 +#endif
6328 +
6329 + "sets %1\n"
6330 : "=m" (v->counter), "=qm" (c)
6331 : "er" (i), "m" (v->counter) : "memory");
6332 return c;
6333 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6334 static inline long atomic64_add_return(long i, atomic64_t *v)
6335 {
6336 long __i = i;
6337 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6338 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6339 +
6340 +#ifdef CONFIG_PAX_REFCOUNT
6341 + "jno 0f\n"
6342 + "movq %0, %1\n"
6343 + "int $4\n0:\n"
6344 + _ASM_EXTABLE(0b, 0b)
6345 +#endif
6346 +
6347 + : "+r" (i), "+m" (v->counter)
6348 + : : "memory");
6349 + return i + __i;
6350 +}
6351 +
6352 +/**
6353 + * atomic64_add_return_unchecked - add and return
6354 + * @i: integer value to add
6355 + * @v: pointer to type atomic64_unchecked_t
6356 + *
6357 + * Atomically adds @i to @v and returns @i + @v
6358 + */
6359 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6360 +{
6361 + long __i = i;
6362 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6363 : "+r" (i), "+m" (v->counter)
6364 : : "memory");
6365 return i + __i;
6366 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6367 }
6368
6369 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6370 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6371 +{
6372 + return atomic64_add_return_unchecked(1, v);
6373 +}
6374 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6375
6376 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6377 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6378 return cmpxchg(&v->counter, old, new);
6379 }
6380
6381 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6382 +{
6383 + return cmpxchg(&v->counter, old, new);
6384 +}
6385 +
6386 static inline long atomic64_xchg(atomic64_t *v, long new)
6387 {
6388 return xchg(&v->counter, new);
6389 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6390 */
6391 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6392 {
6393 - long c, old;
6394 + long c, old, new;
6395 c = atomic64_read(v);
6396 for (;;) {
6397 - if (unlikely(c == (u)))
6398 + if (unlikely(c == u))
6399 break;
6400 - old = atomic64_cmpxchg((v), c, c + (a));
6401 +
6402 + asm volatile("add %2,%0\n"
6403 +
6404 +#ifdef CONFIG_PAX_REFCOUNT
6405 + "jno 0f\n"
6406 + "sub %2,%0\n"
6407 + "int $4\n0:\n"
6408 + _ASM_EXTABLE(0b, 0b)
6409 +#endif
6410 +
6411 + : "=r" (new)
6412 + : "0" (c), "ir" (a));
6413 +
6414 + old = atomic64_cmpxchg(v, c, new);
6415 if (likely(old == c))
6416 break;
6417 c = old;
6418 }
6419 - return c != (u);
6420 + return c != u;
6421 }
6422
6423 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6424 diff -urNp linux-3.0.4/arch/x86/include/asm/atomic.h linux-3.0.4/arch/x86/include/asm/atomic.h
6425 --- linux-3.0.4/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6426 +++ linux-3.0.4/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6427 @@ -22,7 +22,18 @@
6428 */
6429 static inline int atomic_read(const atomic_t *v)
6430 {
6431 - return (*(volatile int *)&(v)->counter);
6432 + return (*(volatile const int *)&(v)->counter);
6433 +}
6434 +
6435 +/**
6436 + * atomic_read_unchecked - read atomic variable
6437 + * @v: pointer of type atomic_unchecked_t
6438 + *
6439 + * Atomically reads the value of @v.
6440 + */
6441 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6442 +{
6443 + return (*(volatile const int *)&(v)->counter);
6444 }
6445
6446 /**
6447 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6448 }
6449
6450 /**
6451 + * atomic_set_unchecked - set atomic variable
6452 + * @v: pointer of type atomic_unchecked_t
6453 + * @i: required value
6454 + *
6455 + * Atomically sets the value of @v to @i.
6456 + */
6457 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6458 +{
6459 + v->counter = i;
6460 +}
6461 +
6462 +/**
6463 * atomic_add - add integer to atomic variable
6464 * @i: integer value to add
6465 * @v: pointer of type atomic_t
6466 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6467 */
6468 static inline void atomic_add(int i, atomic_t *v)
6469 {
6470 - asm volatile(LOCK_PREFIX "addl %1,%0"
6471 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6472 +
6473 +#ifdef CONFIG_PAX_REFCOUNT
6474 + "jno 0f\n"
6475 + LOCK_PREFIX "subl %1,%0\n"
6476 + "int $4\n0:\n"
6477 + _ASM_EXTABLE(0b, 0b)
6478 +#endif
6479 +
6480 + : "+m" (v->counter)
6481 + : "ir" (i));
6482 +}
6483 +
6484 +/**
6485 + * atomic_add_unchecked - add integer to atomic variable
6486 + * @i: integer value to add
6487 + * @v: pointer of type atomic_unchecked_t
6488 + *
6489 + * Atomically adds @i to @v.
6490 + */
6491 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6492 +{
6493 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6494 : "+m" (v->counter)
6495 : "ir" (i));
6496 }
6497 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6498 */
6499 static inline void atomic_sub(int i, atomic_t *v)
6500 {
6501 - asm volatile(LOCK_PREFIX "subl %1,%0"
6502 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6503 +
6504 +#ifdef CONFIG_PAX_REFCOUNT
6505 + "jno 0f\n"
6506 + LOCK_PREFIX "addl %1,%0\n"
6507 + "int $4\n0:\n"
6508 + _ASM_EXTABLE(0b, 0b)
6509 +#endif
6510 +
6511 + : "+m" (v->counter)
6512 + : "ir" (i));
6513 +}
6514 +
6515 +/**
6516 + * atomic_sub_unchecked - subtract integer from atomic variable
6517 + * @i: integer value to subtract
6518 + * @v: pointer of type atomic_unchecked_t
6519 + *
6520 + * Atomically subtracts @i from @v.
6521 + */
6522 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6523 +{
6524 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6525 : "+m" (v->counter)
6526 : "ir" (i));
6527 }
6528 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6529 {
6530 unsigned char c;
6531
6532 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6533 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6534 +
6535 +#ifdef CONFIG_PAX_REFCOUNT
6536 + "jno 0f\n"
6537 + LOCK_PREFIX "addl %2,%0\n"
6538 + "int $4\n0:\n"
6539 + _ASM_EXTABLE(0b, 0b)
6540 +#endif
6541 +
6542 + "sete %1\n"
6543 : "+m" (v->counter), "=qm" (c)
6544 : "ir" (i) : "memory");
6545 return c;
6546 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6547 */
6548 static inline void atomic_inc(atomic_t *v)
6549 {
6550 - asm volatile(LOCK_PREFIX "incl %0"
6551 + asm volatile(LOCK_PREFIX "incl %0\n"
6552 +
6553 +#ifdef CONFIG_PAX_REFCOUNT
6554 + "jno 0f\n"
6555 + LOCK_PREFIX "decl %0\n"
6556 + "int $4\n0:\n"
6557 + _ASM_EXTABLE(0b, 0b)
6558 +#endif
6559 +
6560 + : "+m" (v->counter));
6561 +}
6562 +
6563 +/**
6564 + * atomic_inc_unchecked - increment atomic variable
6565 + * @v: pointer of type atomic_unchecked_t
6566 + *
6567 + * Atomically increments @v by 1.
6568 + */
6569 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6570 +{
6571 + asm volatile(LOCK_PREFIX "incl %0\n"
6572 : "+m" (v->counter));
6573 }
6574
6575 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6576 */
6577 static inline void atomic_dec(atomic_t *v)
6578 {
6579 - asm volatile(LOCK_PREFIX "decl %0"
6580 + asm volatile(LOCK_PREFIX "decl %0\n"
6581 +
6582 +#ifdef CONFIG_PAX_REFCOUNT
6583 + "jno 0f\n"
6584 + LOCK_PREFIX "incl %0\n"
6585 + "int $4\n0:\n"
6586 + _ASM_EXTABLE(0b, 0b)
6587 +#endif
6588 +
6589 + : "+m" (v->counter));
6590 +}
6591 +
6592 +/**
6593 + * atomic_dec_unchecked - decrement atomic variable
6594 + * @v: pointer of type atomic_unchecked_t
6595 + *
6596 + * Atomically decrements @v by 1.
6597 + */
6598 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6599 +{
6600 + asm volatile(LOCK_PREFIX "decl %0\n"
6601 : "+m" (v->counter));
6602 }
6603
6604 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6605 {
6606 unsigned char c;
6607
6608 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6609 + asm volatile(LOCK_PREFIX "decl %0\n"
6610 +
6611 +#ifdef CONFIG_PAX_REFCOUNT
6612 + "jno 0f\n"
6613 + LOCK_PREFIX "incl %0\n"
6614 + "int $4\n0:\n"
6615 + _ASM_EXTABLE(0b, 0b)
6616 +#endif
6617 +
6618 + "sete %1\n"
6619 : "+m" (v->counter), "=qm" (c)
6620 : : "memory");
6621 return c != 0;
6622 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6623 {
6624 unsigned char c;
6625
6626 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6627 + asm volatile(LOCK_PREFIX "incl %0\n"
6628 +
6629 +#ifdef CONFIG_PAX_REFCOUNT
6630 + "jno 0f\n"
6631 + LOCK_PREFIX "decl %0\n"
6632 + "int $4\n0:\n"
6633 + _ASM_EXTABLE(0b, 0b)
6634 +#endif
6635 +
6636 + "sete %1\n"
6637 + : "+m" (v->counter), "=qm" (c)
6638 + : : "memory");
6639 + return c != 0;
6640 +}
6641 +
6642 +/**
6643 + * atomic_inc_and_test_unchecked - increment and test
6644 + * @v: pointer of type atomic_unchecked_t
6645 + *
6646 + * Atomically increments @v by 1
6647 + * and returns true if the result is zero, or false for all
6648 + * other cases.
6649 + */
6650 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6651 +{
6652 + unsigned char c;
6653 +
6654 + asm volatile(LOCK_PREFIX "incl %0\n"
6655 + "sete %1\n"
6656 : "+m" (v->counter), "=qm" (c)
6657 : : "memory");
6658 return c != 0;
6659 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6660 {
6661 unsigned char c;
6662
6663 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6664 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6665 +
6666 +#ifdef CONFIG_PAX_REFCOUNT
6667 + "jno 0f\n"
6668 + LOCK_PREFIX "subl %2,%0\n"
6669 + "int $4\n0:\n"
6670 + _ASM_EXTABLE(0b, 0b)
6671 +#endif
6672 +
6673 + "sets %1\n"
6674 : "+m" (v->counter), "=qm" (c)
6675 : "ir" (i) : "memory");
6676 return c;
6677 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6678 #endif
6679 /* Modern 486+ processor */
6680 __i = i;
6681 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6682 +
6683 +#ifdef CONFIG_PAX_REFCOUNT
6684 + "jno 0f\n"
6685 + "movl %0, %1\n"
6686 + "int $4\n0:\n"
6687 + _ASM_EXTABLE(0b, 0b)
6688 +#endif
6689 +
6690 + : "+r" (i), "+m" (v->counter)
6691 + : : "memory");
6692 + return i + __i;
6693 +
6694 +#ifdef CONFIG_M386
6695 +no_xadd: /* Legacy 386 processor */
6696 + local_irq_save(flags);
6697 + __i = atomic_read(v);
6698 + atomic_set(v, i + __i);
6699 + local_irq_restore(flags);
6700 + return i + __i;
6701 +#endif
6702 +}
6703 +
6704 +/**
6705 + * atomic_add_return_unchecked - add integer and return
6706 + * @v: pointer of type atomic_unchecked_t
6707 + * @i: integer value to add
6708 + *
6709 + * Atomically adds @i to @v and returns @i + @v
6710 + */
6711 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6712 +{
6713 + int __i;
6714 +#ifdef CONFIG_M386
6715 + unsigned long flags;
6716 + if (unlikely(boot_cpu_data.x86 <= 3))
6717 + goto no_xadd;
6718 +#endif
6719 + /* Modern 486+ processor */
6720 + __i = i;
6721 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6722 : "+r" (i), "+m" (v->counter)
6723 : : "memory");
6724 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6725 }
6726
6727 #define atomic_inc_return(v) (atomic_add_return(1, v))
6728 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6729 +{
6730 + return atomic_add_return_unchecked(1, v);
6731 +}
6732 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6733
6734 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6735 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6736 return cmpxchg(&v->counter, old, new);
6737 }
6738
6739 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6740 +{
6741 + return cmpxchg(&v->counter, old, new);
6742 +}
6743 +
6744 static inline int atomic_xchg(atomic_t *v, int new)
6745 {
6746 return xchg(&v->counter, new);
6747 }
6748
6749 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6750 +{
6751 + return xchg(&v->counter, new);
6752 +}
6753 +
6754 /**
6755 * atomic_add_unless - add unless the number is already a given value
6756 * @v: pointer of type atomic_t
6757 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6758 */
6759 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6760 {
6761 - int c, old;
6762 + int c, old, new;
6763 c = atomic_read(v);
6764 for (;;) {
6765 - if (unlikely(c == (u)))
6766 + if (unlikely(c == u))
6767 break;
6768 - old = atomic_cmpxchg((v), c, c + (a));
6769 +
6770 + asm volatile("addl %2,%0\n"
6771 +
6772 +#ifdef CONFIG_PAX_REFCOUNT
6773 + "jno 0f\n"
6774 + "subl %2,%0\n"
6775 + "int $4\n0:\n"
6776 + _ASM_EXTABLE(0b, 0b)
6777 +#endif
6778 +
6779 + : "=r" (new)
6780 + : "0" (c), "ir" (a));
6781 +
6782 + old = atomic_cmpxchg(v, c, new);
6783 if (likely(old == c))
6784 break;
6785 c = old;
6786 }
6787 - return c != (u);
6788 + return c != u;
6789 }
6790
6791 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6792
6793 +/**
6794 + * atomic_inc_not_zero_hint - increment if not null
6795 + * @v: pointer of type atomic_t
6796 + * @hint: probable value of the atomic before the increment
6797 + *
6798 + * This version of atomic_inc_not_zero() gives a hint of probable
6799 + * value of the atomic. This helps processor to not read the memory
6800 + * before doing the atomic read/modify/write cycle, lowering
6801 + * number of bus transactions on some arches.
6802 + *
6803 + * Returns: 0 if increment was not done, 1 otherwise.
6804 + */
6805 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6806 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6807 +{
6808 + int val, c = hint, new;
6809 +
6810 + /* sanity test, should be removed by compiler if hint is a constant */
6811 + if (!hint)
6812 + return atomic_inc_not_zero(v);
6813 +
6814 + do {
6815 + asm volatile("incl %0\n"
6816 +
6817 +#ifdef CONFIG_PAX_REFCOUNT
6818 + "jno 0f\n"
6819 + "decl %0\n"
6820 + "int $4\n0:\n"
6821 + _ASM_EXTABLE(0b, 0b)
6822 +#endif
6823 +
6824 + : "=r" (new)
6825 + : "0" (c));
6826 +
6827 + val = atomic_cmpxchg(v, c, new);
6828 + if (val == c)
6829 + return 1;
6830 + c = val;
6831 + } while (c);
6832 +
6833 + return 0;
6834 +}
6835 +
6836 /*
6837 * atomic_dec_if_positive - decrement by 1 if old value positive
6838 * @v: pointer of type atomic_t
6839 diff -urNp linux-3.0.4/arch/x86/include/asm/bitops.h linux-3.0.4/arch/x86/include/asm/bitops.h
6840 --- linux-3.0.4/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6841 +++ linux-3.0.4/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6842 @@ -38,7 +38,7 @@
6843 * a mask operation on a byte.
6844 */
6845 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6846 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6847 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6848 #define CONST_MASK(nr) (1 << ((nr) & 7))
6849
6850 /**
6851 diff -urNp linux-3.0.4/arch/x86/include/asm/boot.h linux-3.0.4/arch/x86/include/asm/boot.h
6852 --- linux-3.0.4/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6853 +++ linux-3.0.4/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6854 @@ -11,10 +11,15 @@
6855 #include <asm/pgtable_types.h>
6856
6857 /* Physical address where kernel should be loaded. */
6858 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6859 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6860 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6861 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6862
6863 +#ifndef __ASSEMBLY__
6864 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6865 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6866 +#endif
6867 +
6868 /* Minimum kernel alignment, as a power of two */
6869 #ifdef CONFIG_X86_64
6870 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6871 diff -urNp linux-3.0.4/arch/x86/include/asm/cacheflush.h linux-3.0.4/arch/x86/include/asm/cacheflush.h
6872 --- linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
6873 +++ linux-3.0.4/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
6874 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6875 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6876
6877 if (pg_flags == _PGMT_DEFAULT)
6878 - return -1;
6879 + return ~0UL;
6880 else if (pg_flags == _PGMT_WC)
6881 return _PAGE_CACHE_WC;
6882 else if (pg_flags == _PGMT_UC_MINUS)
6883 diff -urNp linux-3.0.4/arch/x86/include/asm/cache.h linux-3.0.4/arch/x86/include/asm/cache.h
6884 --- linux-3.0.4/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
6885 +++ linux-3.0.4/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
6886 @@ -5,12 +5,13 @@
6887
6888 /* L1 cache line size */
6889 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6890 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6891 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6892
6893 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6894 +#define __read_only __attribute__((__section__(".data..read_only")))
6895
6896 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6897 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6898 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6899
6900 #ifdef CONFIG_X86_VSMP
6901 #ifdef CONFIG_SMP
6902 diff -urNp linux-3.0.4/arch/x86/include/asm/checksum_32.h linux-3.0.4/arch/x86/include/asm/checksum_32.h
6903 --- linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
6904 +++ linux-3.0.4/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
6905 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6906 int len, __wsum sum,
6907 int *src_err_ptr, int *dst_err_ptr);
6908
6909 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6910 + int len, __wsum sum,
6911 + int *src_err_ptr, int *dst_err_ptr);
6912 +
6913 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6914 + int len, __wsum sum,
6915 + int *src_err_ptr, int *dst_err_ptr);
6916 +
6917 /*
6918 * Note: when you get a NULL pointer exception here this means someone
6919 * passed in an incorrect kernel address to one of these functions.
6920 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6921 int *err_ptr)
6922 {
6923 might_sleep();
6924 - return csum_partial_copy_generic((__force void *)src, dst,
6925 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6926 len, sum, err_ptr, NULL);
6927 }
6928
6929 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6930 {
6931 might_sleep();
6932 if (access_ok(VERIFY_WRITE, dst, len))
6933 - return csum_partial_copy_generic(src, (__force void *)dst,
6934 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6935 len, sum, NULL, err_ptr);
6936
6937 if (len)
6938 diff -urNp linux-3.0.4/arch/x86/include/asm/cpufeature.h linux-3.0.4/arch/x86/include/asm/cpufeature.h
6939 --- linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
6940 +++ linux-3.0.4/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
6941 @@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
6942 ".section .discard,\"aw\",@progbits\n"
6943 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6944 ".previous\n"
6945 - ".section .altinstr_replacement,\"ax\"\n"
6946 + ".section .altinstr_replacement,\"a\"\n"
6947 "3: movb $1,%0\n"
6948 "4:\n"
6949 ".previous\n"
6950 diff -urNp linux-3.0.4/arch/x86/include/asm/desc_defs.h linux-3.0.4/arch/x86/include/asm/desc_defs.h
6951 --- linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
6952 +++ linux-3.0.4/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
6953 @@ -31,6 +31,12 @@ struct desc_struct {
6954 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6955 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6956 };
6957 + struct {
6958 + u16 offset_low;
6959 + u16 seg;
6960 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6961 + unsigned offset_high: 16;
6962 + } gate;
6963 };
6964 } __attribute__((packed));
6965
6966 diff -urNp linux-3.0.4/arch/x86/include/asm/desc.h linux-3.0.4/arch/x86/include/asm/desc.h
6967 --- linux-3.0.4/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
6968 +++ linux-3.0.4/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
6969 @@ -4,6 +4,7 @@
6970 #include <asm/desc_defs.h>
6971 #include <asm/ldt.h>
6972 #include <asm/mmu.h>
6973 +#include <asm/pgtable.h>
6974
6975 #include <linux/smp.h>
6976
6977 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
6978
6979 desc->type = (info->read_exec_only ^ 1) << 1;
6980 desc->type |= info->contents << 2;
6981 + desc->type |= info->seg_not_present ^ 1;
6982
6983 desc->s = 1;
6984 desc->dpl = 0x3;
6985 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
6986 }
6987
6988 extern struct desc_ptr idt_descr;
6989 -extern gate_desc idt_table[];
6990 -
6991 -struct gdt_page {
6992 - struct desc_struct gdt[GDT_ENTRIES];
6993 -} __attribute__((aligned(PAGE_SIZE)));
6994 -
6995 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6996 +extern gate_desc idt_table[256];
6997
6998 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6999 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7000 {
7001 - return per_cpu(gdt_page, cpu).gdt;
7002 + return cpu_gdt_table[cpu];
7003 }
7004
7005 #ifdef CONFIG_X86_64
7006 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7007 unsigned long base, unsigned dpl, unsigned flags,
7008 unsigned short seg)
7009 {
7010 - gate->a = (seg << 16) | (base & 0xffff);
7011 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7012 + gate->gate.offset_low = base;
7013 + gate->gate.seg = seg;
7014 + gate->gate.reserved = 0;
7015 + gate->gate.type = type;
7016 + gate->gate.s = 0;
7017 + gate->gate.dpl = dpl;
7018 + gate->gate.p = 1;
7019 + gate->gate.offset_high = base >> 16;
7020 }
7021
7022 #endif
7023 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7024
7025 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7026 {
7027 + pax_open_kernel();
7028 memcpy(&idt[entry], gate, sizeof(*gate));
7029 + pax_close_kernel();
7030 }
7031
7032 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7033 {
7034 + pax_open_kernel();
7035 memcpy(&ldt[entry], desc, 8);
7036 + pax_close_kernel();
7037 }
7038
7039 static inline void
7040 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7041 default: size = sizeof(*gdt); break;
7042 }
7043
7044 + pax_open_kernel();
7045 memcpy(&gdt[entry], desc, size);
7046 + pax_close_kernel();
7047 }
7048
7049 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7050 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7051
7052 static inline void native_load_tr_desc(void)
7053 {
7054 + pax_open_kernel();
7055 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7056 + pax_close_kernel();
7057 }
7058
7059 static inline void native_load_gdt(const struct desc_ptr *dtr)
7060 @@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7061 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7062 unsigned int i;
7063
7064 + pax_open_kernel();
7065 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7066 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7067 + pax_close_kernel();
7068 }
7069
7070 #define _LDT_empty(info) \
7071 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7072 desc->limit = (limit >> 16) & 0xf;
7073 }
7074
7075 -static inline void _set_gate(int gate, unsigned type, void *addr,
7076 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7077 unsigned dpl, unsigned ist, unsigned seg)
7078 {
7079 gate_desc s;
7080 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7081 * Pentium F0 0F bugfix can have resulted in the mapped
7082 * IDT being write-protected.
7083 */
7084 -static inline void set_intr_gate(unsigned int n, void *addr)
7085 +static inline void set_intr_gate(unsigned int n, const void *addr)
7086 {
7087 BUG_ON((unsigned)n > 0xFF);
7088 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7089 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7090 /*
7091 * This routine sets up an interrupt gate at directory privilege level 3.
7092 */
7093 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7094 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7095 {
7096 BUG_ON((unsigned)n > 0xFF);
7097 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7098 }
7099
7100 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7101 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7102 {
7103 BUG_ON((unsigned)n > 0xFF);
7104 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7105 }
7106
7107 -static inline void set_trap_gate(unsigned int n, void *addr)
7108 +static inline void set_trap_gate(unsigned int n, const void *addr)
7109 {
7110 BUG_ON((unsigned)n > 0xFF);
7111 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7112 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7113 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7114 {
7115 BUG_ON((unsigned)n > 0xFF);
7116 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7117 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7118 }
7119
7120 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7121 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7122 {
7123 BUG_ON((unsigned)n > 0xFF);
7124 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7125 }
7126
7127 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7128 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7129 {
7130 BUG_ON((unsigned)n > 0xFF);
7131 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7132 }
7133
7134 +#ifdef CONFIG_X86_32
7135 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7136 +{
7137 + struct desc_struct d;
7138 +
7139 + if (likely(limit))
7140 + limit = (limit - 1UL) >> PAGE_SHIFT;
7141 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7142 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7143 +}
7144 +#endif
7145 +
7146 #endif /* _ASM_X86_DESC_H */
7147 diff -urNp linux-3.0.4/arch/x86/include/asm/e820.h linux-3.0.4/arch/x86/include/asm/e820.h
7148 --- linux-3.0.4/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7149 +++ linux-3.0.4/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7150 @@ -69,7 +69,7 @@ struct e820map {
7151 #define ISA_START_ADDRESS 0xa0000
7152 #define ISA_END_ADDRESS 0x100000
7153
7154 -#define BIOS_BEGIN 0x000a0000
7155 +#define BIOS_BEGIN 0x000c0000
7156 #define BIOS_END 0x00100000
7157
7158 #define BIOS_ROM_BASE 0xffe00000
7159 diff -urNp linux-3.0.4/arch/x86/include/asm/elf.h linux-3.0.4/arch/x86/include/asm/elf.h
7160 --- linux-3.0.4/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7161 +++ linux-3.0.4/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7162 @@ -237,7 +237,25 @@ extern int force_personality32;
7163 the loader. We need to make sure that it is out of the way of the program
7164 that it will "exec", and that there is sufficient room for the brk. */
7165
7166 +#ifdef CONFIG_PAX_SEGMEXEC
7167 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7168 +#else
7169 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7170 +#endif
7171 +
7172 +#ifdef CONFIG_PAX_ASLR
7173 +#ifdef CONFIG_X86_32
7174 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7175 +
7176 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7177 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7178 +#else
7179 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7180 +
7181 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7182 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7183 +#endif
7184 +#endif
7185
7186 /* This yields a mask that user programs can use to figure out what
7187 instruction set this CPU supports. This could be done in user space,
7188 @@ -290,9 +308,7 @@ do { \
7189
7190 #define ARCH_DLINFO \
7191 do { \
7192 - if (vdso_enabled) \
7193 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7194 - (unsigned long)current->mm->context.vdso); \
7195 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7196 } while (0)
7197
7198 #define AT_SYSINFO 32
7199 @@ -303,7 +319,7 @@ do { \
7200
7201 #endif /* !CONFIG_X86_32 */
7202
7203 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7204 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7205
7206 #define VDSO_ENTRY \
7207 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7208 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7209 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7210 #define compat_arch_setup_additional_pages syscall32_setup_pages
7211
7212 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7213 -#define arch_randomize_brk arch_randomize_brk
7214 -
7215 #endif /* _ASM_X86_ELF_H */
7216 diff -urNp linux-3.0.4/arch/x86/include/asm/emergency-restart.h linux-3.0.4/arch/x86/include/asm/emergency-restart.h
7217 --- linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7218 +++ linux-3.0.4/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7219 @@ -15,6 +15,6 @@ enum reboot_type {
7220
7221 extern enum reboot_type reboot_type;
7222
7223 -extern void machine_emergency_restart(void);
7224 +extern void machine_emergency_restart(void) __noreturn;
7225
7226 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7227 diff -urNp linux-3.0.4/arch/x86/include/asm/futex.h linux-3.0.4/arch/x86/include/asm/futex.h
7228 --- linux-3.0.4/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7229 +++ linux-3.0.4/arch/x86/include/asm/futex.h 2011-08-23 21:47:55.000000000 -0400
7230 @@ -12,16 +12,18 @@
7231 #include <asm/system.h>
7232
7233 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7234 + typecheck(u32 *, uaddr); \
7235 asm volatile("1:\t" insn "\n" \
7236 "2:\t.section .fixup,\"ax\"\n" \
7237 "3:\tmov\t%3, %1\n" \
7238 "\tjmp\t2b\n" \
7239 "\t.previous\n" \
7240 _ASM_EXTABLE(1b, 3b) \
7241 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7242 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7243 : "i" (-EFAULT), "0" (oparg), "1" (0))
7244
7245 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7246 + typecheck(u32 *, uaddr); \
7247 asm volatile("1:\tmovl %2, %0\n" \
7248 "\tmovl\t%0, %3\n" \
7249 "\t" insn "\n" \
7250 @@ -34,7 +36,7 @@
7251 _ASM_EXTABLE(1b, 4b) \
7252 _ASM_EXTABLE(2b, 4b) \
7253 : "=&a" (oldval), "=&r" (ret), \
7254 - "+m" (*uaddr), "=&r" (tem) \
7255 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7256 : "r" (oparg), "i" (-EFAULT), "1" (0))
7257
7258 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7259 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7260
7261 switch (op) {
7262 case FUTEX_OP_SET:
7263 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7264 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7265 break;
7266 case FUTEX_OP_ADD:
7267 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7268 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7269 uaddr, oparg);
7270 break;
7271 case FUTEX_OP_OR:
7272 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7273 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7274 return -EFAULT;
7275
7276 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7277 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7278 "2:\t.section .fixup, \"ax\"\n"
7279 "3:\tmov %3, %0\n"
7280 "\tjmp 2b\n"
7281 "\t.previous\n"
7282 _ASM_EXTABLE(1b, 3b)
7283 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7284 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7285 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7286 : "memory"
7287 );
7288 diff -urNp linux-3.0.4/arch/x86/include/asm/hw_irq.h linux-3.0.4/arch/x86/include/asm/hw_irq.h
7289 --- linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7290 +++ linux-3.0.4/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7291 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7292 extern void enable_IO_APIC(void);
7293
7294 /* Statistics */
7295 -extern atomic_t irq_err_count;
7296 -extern atomic_t irq_mis_count;
7297 +extern atomic_unchecked_t irq_err_count;
7298 +extern atomic_unchecked_t irq_mis_count;
7299
7300 /* EISA */
7301 extern void eisa_set_level_irq(unsigned int irq);
7302 diff -urNp linux-3.0.4/arch/x86/include/asm/i387.h linux-3.0.4/arch/x86/include/asm/i387.h
7303 --- linux-3.0.4/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7304 +++ linux-3.0.4/arch/x86/include/asm/i387.h 2011-08-23 21:47:55.000000000 -0400
7305 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7306 {
7307 int err;
7308
7309 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7310 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7311 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7312 +#endif
7313 +
7314 /* See comment in fxsave() below. */
7315 #ifdef CONFIG_AS_FXSAVEQ
7316 asm volatile("1: fxrstorq %[fx]\n\t"
7317 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7318 {
7319 int err;
7320
7321 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7322 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7323 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7324 +#endif
7325 +
7326 /*
7327 * Clear the bytes not touched by the fxsave and reserved
7328 * for the SW usage.
7329 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7330 #endif /* CONFIG_X86_64 */
7331
7332 /* We need a safe address that is cheap to find and that is already
7333 - in L1 during context switch. The best choices are unfortunately
7334 - different for UP and SMP */
7335 -#ifdef CONFIG_SMP
7336 -#define safe_address (__per_cpu_offset[0])
7337 -#else
7338 -#define safe_address (kstat_cpu(0).cpustat.user)
7339 -#endif
7340 + in L1 during context switch. */
7341 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7342
7343 /*
7344 * These must be called with preempt disabled
7345 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7346 struct thread_info *me = current_thread_info();
7347 preempt_disable();
7348 if (me->status & TS_USEDFPU)
7349 - __save_init_fpu(me->task);
7350 + __save_init_fpu(current);
7351 else
7352 clts();
7353 }
7354 diff -urNp linux-3.0.4/arch/x86/include/asm/io.h linux-3.0.4/arch/x86/include/asm/io.h
7355 --- linux-3.0.4/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7356 +++ linux-3.0.4/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7357 @@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7358
7359 #include <linux/vmalloc.h>
7360
7361 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7362 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7363 +{
7364 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7365 +}
7366 +
7367 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7368 +{
7369 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7370 +}
7371 +
7372 /*
7373 * Convert a virtual cached pointer to an uncached pointer
7374 */
7375 diff -urNp linux-3.0.4/arch/x86/include/asm/irqflags.h linux-3.0.4/arch/x86/include/asm/irqflags.h
7376 --- linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7377 +++ linux-3.0.4/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7378 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7379 sti; \
7380 sysexit
7381
7382 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7383 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7384 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7385 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7386 +
7387 #else
7388 #define INTERRUPT_RETURN iret
7389 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7390 diff -urNp linux-3.0.4/arch/x86/include/asm/kprobes.h linux-3.0.4/arch/x86/include/asm/kprobes.h
7391 --- linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7392 +++ linux-3.0.4/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7393 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7394 #define RELATIVEJUMP_SIZE 5
7395 #define RELATIVECALL_OPCODE 0xe8
7396 #define RELATIVE_ADDR_SIZE 4
7397 -#define MAX_STACK_SIZE 64
7398 -#define MIN_STACK_SIZE(ADDR) \
7399 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7400 - THREAD_SIZE - (unsigned long)(ADDR))) \
7401 - ? (MAX_STACK_SIZE) \
7402 - : (((unsigned long)current_thread_info()) + \
7403 - THREAD_SIZE - (unsigned long)(ADDR)))
7404 +#define MAX_STACK_SIZE 64UL
7405 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7406
7407 #define flush_insn_slot(p) do { } while (0)
7408
7409 diff -urNp linux-3.0.4/arch/x86/include/asm/kvm_host.h linux-3.0.4/arch/x86/include/asm/kvm_host.h
7410 --- linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7411 +++ linux-3.0.4/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7412 @@ -441,7 +441,7 @@ struct kvm_arch {
7413 unsigned int n_used_mmu_pages;
7414 unsigned int n_requested_mmu_pages;
7415 unsigned int n_max_mmu_pages;
7416 - atomic_t invlpg_counter;
7417 + atomic_unchecked_t invlpg_counter;
7418 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7419 /*
7420 * Hash table of struct kvm_mmu_page.
7421 @@ -619,7 +619,7 @@ struct kvm_x86_ops {
7422 enum x86_intercept_stage stage);
7423
7424 const struct trace_print_flags *exit_reasons_str;
7425 -};
7426 +} __do_const;
7427
7428 struct kvm_arch_async_pf {
7429 u32 token;
7430 diff -urNp linux-3.0.4/arch/x86/include/asm/local.h linux-3.0.4/arch/x86/include/asm/local.h
7431 --- linux-3.0.4/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7432 +++ linux-3.0.4/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7433 @@ -18,26 +18,58 @@ typedef struct {
7434
7435 static inline void local_inc(local_t *l)
7436 {
7437 - asm volatile(_ASM_INC "%0"
7438 + asm volatile(_ASM_INC "%0\n"
7439 +
7440 +#ifdef CONFIG_PAX_REFCOUNT
7441 + "jno 0f\n"
7442 + _ASM_DEC "%0\n"
7443 + "int $4\n0:\n"
7444 + _ASM_EXTABLE(0b, 0b)
7445 +#endif
7446 +
7447 : "+m" (l->a.counter));
7448 }
7449
7450 static inline void local_dec(local_t *l)
7451 {
7452 - asm volatile(_ASM_DEC "%0"
7453 + asm volatile(_ASM_DEC "%0\n"
7454 +
7455 +#ifdef CONFIG_PAX_REFCOUNT
7456 + "jno 0f\n"
7457 + _ASM_INC "%0\n"
7458 + "int $4\n0:\n"
7459 + _ASM_EXTABLE(0b, 0b)
7460 +#endif
7461 +
7462 : "+m" (l->a.counter));
7463 }
7464
7465 static inline void local_add(long i, local_t *l)
7466 {
7467 - asm volatile(_ASM_ADD "%1,%0"
7468 + asm volatile(_ASM_ADD "%1,%0\n"
7469 +
7470 +#ifdef CONFIG_PAX_REFCOUNT
7471 + "jno 0f\n"
7472 + _ASM_SUB "%1,%0\n"
7473 + "int $4\n0:\n"
7474 + _ASM_EXTABLE(0b, 0b)
7475 +#endif
7476 +
7477 : "+m" (l->a.counter)
7478 : "ir" (i));
7479 }
7480
7481 static inline void local_sub(long i, local_t *l)
7482 {
7483 - asm volatile(_ASM_SUB "%1,%0"
7484 + asm volatile(_ASM_SUB "%1,%0\n"
7485 +
7486 +#ifdef CONFIG_PAX_REFCOUNT
7487 + "jno 0f\n"
7488 + _ASM_ADD "%1,%0\n"
7489 + "int $4\n0:\n"
7490 + _ASM_EXTABLE(0b, 0b)
7491 +#endif
7492 +
7493 : "+m" (l->a.counter)
7494 : "ir" (i));
7495 }
7496 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7497 {
7498 unsigned char c;
7499
7500 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7501 + asm volatile(_ASM_SUB "%2,%0\n"
7502 +
7503 +#ifdef CONFIG_PAX_REFCOUNT
7504 + "jno 0f\n"
7505 + _ASM_ADD "%2,%0\n"
7506 + "int $4\n0:\n"
7507 + _ASM_EXTABLE(0b, 0b)
7508 +#endif
7509 +
7510 + "sete %1\n"
7511 : "+m" (l->a.counter), "=qm" (c)
7512 : "ir" (i) : "memory");
7513 return c;
7514 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7515 {
7516 unsigned char c;
7517
7518 - asm volatile(_ASM_DEC "%0; sete %1"
7519 + asm volatile(_ASM_DEC "%0\n"
7520 +
7521 +#ifdef CONFIG_PAX_REFCOUNT
7522 + "jno 0f\n"
7523 + _ASM_INC "%0\n"
7524 + "int $4\n0:\n"
7525 + _ASM_EXTABLE(0b, 0b)
7526 +#endif
7527 +
7528 + "sete %1\n"
7529 : "+m" (l->a.counter), "=qm" (c)
7530 : : "memory");
7531 return c != 0;
7532 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7533 {
7534 unsigned char c;
7535
7536 - asm volatile(_ASM_INC "%0; sete %1"
7537 + asm volatile(_ASM_INC "%0\n"
7538 +
7539 +#ifdef CONFIG_PAX_REFCOUNT
7540 + "jno 0f\n"
7541 + _ASM_DEC "%0\n"
7542 + "int $4\n0:\n"
7543 + _ASM_EXTABLE(0b, 0b)
7544 +#endif
7545 +
7546 + "sete %1\n"
7547 : "+m" (l->a.counter), "=qm" (c)
7548 : : "memory");
7549 return c != 0;
7550 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7551 {
7552 unsigned char c;
7553
7554 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7555 + asm volatile(_ASM_ADD "%2,%0\n"
7556 +
7557 +#ifdef CONFIG_PAX_REFCOUNT
7558 + "jno 0f\n"
7559 + _ASM_SUB "%2,%0\n"
7560 + "int $4\n0:\n"
7561 + _ASM_EXTABLE(0b, 0b)
7562 +#endif
7563 +
7564 + "sets %1\n"
7565 : "+m" (l->a.counter), "=qm" (c)
7566 : "ir" (i) : "memory");
7567 return c;
7568 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7569 #endif
7570 /* Modern 486+ processor */
7571 __i = i;
7572 - asm volatile(_ASM_XADD "%0, %1;"
7573 + asm volatile(_ASM_XADD "%0, %1\n"
7574 +
7575 +#ifdef CONFIG_PAX_REFCOUNT
7576 + "jno 0f\n"
7577 + _ASM_MOV "%0,%1\n"
7578 + "int $4\n0:\n"
7579 + _ASM_EXTABLE(0b, 0b)
7580 +#endif
7581 +
7582 : "+r" (i), "+m" (l->a.counter)
7583 : : "memory");
7584 return i + __i;
7585 diff -urNp linux-3.0.4/arch/x86/include/asm/mman.h linux-3.0.4/arch/x86/include/asm/mman.h
7586 --- linux-3.0.4/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7587 +++ linux-3.0.4/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7588 @@ -5,4 +5,14 @@
7589
7590 #include <asm-generic/mman.h>
7591
7592 +#ifdef __KERNEL__
7593 +#ifndef __ASSEMBLY__
7594 +#ifdef CONFIG_X86_32
7595 +#define arch_mmap_check i386_mmap_check
7596 +int i386_mmap_check(unsigned long addr, unsigned long len,
7597 + unsigned long flags);
7598 +#endif
7599 +#endif
7600 +#endif
7601 +
7602 #endif /* _ASM_X86_MMAN_H */
7603 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu_context.h linux-3.0.4/arch/x86/include/asm/mmu_context.h
7604 --- linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7605 +++ linux-3.0.4/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7606 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7607
7608 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7609 {
7610 +
7611 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7612 + unsigned int i;
7613 + pgd_t *pgd;
7614 +
7615 + pax_open_kernel();
7616 + pgd = get_cpu_pgd(smp_processor_id());
7617 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7618 + set_pgd_batched(pgd+i, native_make_pgd(0));
7619 + pax_close_kernel();
7620 +#endif
7621 +
7622 #ifdef CONFIG_SMP
7623 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7624 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7625 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7626 struct task_struct *tsk)
7627 {
7628 unsigned cpu = smp_processor_id();
7629 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7630 + int tlbstate = TLBSTATE_OK;
7631 +#endif
7632
7633 if (likely(prev != next)) {
7634 #ifdef CONFIG_SMP
7635 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7636 + tlbstate = percpu_read(cpu_tlbstate.state);
7637 +#endif
7638 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7639 percpu_write(cpu_tlbstate.active_mm, next);
7640 #endif
7641 cpumask_set_cpu(cpu, mm_cpumask(next));
7642
7643 /* Re-load page tables */
7644 +#ifdef CONFIG_PAX_PER_CPU_PGD
7645 + pax_open_kernel();
7646 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7647 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7648 + pax_close_kernel();
7649 + load_cr3(get_cpu_pgd(cpu));
7650 +#else
7651 load_cr3(next->pgd);
7652 +#endif
7653
7654 /* stop flush ipis for the previous mm */
7655 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7656 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7657 */
7658 if (unlikely(prev->context.ldt != next->context.ldt))
7659 load_LDT_nolock(&next->context);
7660 - }
7661 +
7662 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7663 + if (!(__supported_pte_mask & _PAGE_NX)) {
7664 + smp_mb__before_clear_bit();
7665 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7666 + smp_mb__after_clear_bit();
7667 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7668 + }
7669 +#endif
7670 +
7671 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7672 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7673 + prev->context.user_cs_limit != next->context.user_cs_limit))
7674 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7675 #ifdef CONFIG_SMP
7676 + else if (unlikely(tlbstate != TLBSTATE_OK))
7677 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7678 +#endif
7679 +#endif
7680 +
7681 + }
7682 else {
7683 +
7684 +#ifdef CONFIG_PAX_PER_CPU_PGD
7685 + pax_open_kernel();
7686 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7687 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7688 + pax_close_kernel();
7689 + load_cr3(get_cpu_pgd(cpu));
7690 +#endif
7691 +
7692 +#ifdef CONFIG_SMP
7693 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7694 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7695
7696 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7697 * tlb flush IPI delivery. We must reload CR3
7698 * to make sure to use no freed page tables.
7699 */
7700 +
7701 +#ifndef CONFIG_PAX_PER_CPU_PGD
7702 load_cr3(next->pgd);
7703 +#endif
7704 +
7705 load_LDT_nolock(&next->context);
7706 +
7707 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7708 + if (!(__supported_pte_mask & _PAGE_NX))
7709 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7710 +#endif
7711 +
7712 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7713 +#ifdef CONFIG_PAX_PAGEEXEC
7714 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7715 +#endif
7716 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7717 +#endif
7718 +
7719 }
7720 - }
7721 #endif
7722 + }
7723 }
7724
7725 #define activate_mm(prev, next) \
7726 diff -urNp linux-3.0.4/arch/x86/include/asm/mmu.h linux-3.0.4/arch/x86/include/asm/mmu.h
7727 --- linux-3.0.4/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7728 +++ linux-3.0.4/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7729 @@ -9,7 +9,7 @@
7730 * we put the segment information here.
7731 */
7732 typedef struct {
7733 - void *ldt;
7734 + struct desc_struct *ldt;
7735 int size;
7736
7737 #ifdef CONFIG_X86_64
7738 @@ -18,7 +18,19 @@ typedef struct {
7739 #endif
7740
7741 struct mutex lock;
7742 - void *vdso;
7743 + unsigned long vdso;
7744 +
7745 +#ifdef CONFIG_X86_32
7746 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7747 + unsigned long user_cs_base;
7748 + unsigned long user_cs_limit;
7749 +
7750 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7751 + cpumask_t cpu_user_cs_mask;
7752 +#endif
7753 +
7754 +#endif
7755 +#endif
7756 } mm_context_t;
7757
7758 #ifdef CONFIG_SMP
7759 diff -urNp linux-3.0.4/arch/x86/include/asm/module.h linux-3.0.4/arch/x86/include/asm/module.h
7760 --- linux-3.0.4/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7761 +++ linux-3.0.4/arch/x86/include/asm/module.h 2011-08-23 21:48:14.000000000 -0400
7762 @@ -5,6 +5,7 @@
7763
7764 #ifdef CONFIG_X86_64
7765 /* X86_64 does not define MODULE_PROC_FAMILY */
7766 +#define MODULE_PROC_FAMILY ""
7767 #elif defined CONFIG_M386
7768 #define MODULE_PROC_FAMILY "386 "
7769 #elif defined CONFIG_M486
7770 @@ -59,8 +60,30 @@
7771 #error unknown processor family
7772 #endif
7773
7774 -#ifdef CONFIG_X86_32
7775 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7776 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7777 +#define MODULE_PAX_UDEREF "UDEREF "
7778 +#else
7779 +#define MODULE_PAX_UDEREF ""
7780 +#endif
7781 +
7782 +#ifdef CONFIG_PAX_KERNEXEC
7783 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7784 +#else
7785 +#define MODULE_PAX_KERNEXEC ""
7786 #endif
7787
7788 +#ifdef CONFIG_PAX_REFCOUNT
7789 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7790 +#else
7791 +#define MODULE_PAX_REFCOUNT ""
7792 +#endif
7793 +
7794 +#ifdef CONFIG_GRKERNSEC
7795 +#define MODULE_GRSEC "GRSECURITY "
7796 +#else
7797 +#define MODULE_GRSEC ""
7798 +#endif
7799 +
7800 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7801 +
7802 #endif /* _ASM_X86_MODULE_H */
7803 diff -urNp linux-3.0.4/arch/x86/include/asm/page_64_types.h linux-3.0.4/arch/x86/include/asm/page_64_types.h
7804 --- linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7805 +++ linux-3.0.4/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7806 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7807
7808 /* duplicated to the one in bootmem.h */
7809 extern unsigned long max_pfn;
7810 -extern unsigned long phys_base;
7811 +extern const unsigned long phys_base;
7812
7813 extern unsigned long __phys_addr(unsigned long);
7814 #define __phys_reloc_hide(x) (x)
7815 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt.h linux-3.0.4/arch/x86/include/asm/paravirt.h
7816 --- linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7817 +++ linux-3.0.4/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7818 @@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7819 val);
7820 }
7821
7822 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7823 +{
7824 + pgdval_t val = native_pgd_val(pgd);
7825 +
7826 + if (sizeof(pgdval_t) > sizeof(long))
7827 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7828 + val, (u64)val >> 32);
7829 + else
7830 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7831 + val);
7832 +}
7833 +
7834 static inline void pgd_clear(pgd_t *pgdp)
7835 {
7836 set_pgd(pgdp, __pgd(0));
7837 @@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7838 pv_mmu_ops.set_fixmap(idx, phys, flags);
7839 }
7840
7841 +#ifdef CONFIG_PAX_KERNEXEC
7842 +static inline unsigned long pax_open_kernel(void)
7843 +{
7844 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7845 +}
7846 +
7847 +static inline unsigned long pax_close_kernel(void)
7848 +{
7849 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7850 +}
7851 +#else
7852 +static inline unsigned long pax_open_kernel(void) { return 0; }
7853 +static inline unsigned long pax_close_kernel(void) { return 0; }
7854 +#endif
7855 +
7856 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7857
7858 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7859 @@ -955,7 +982,7 @@ extern void default_banner(void);
7860
7861 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7862 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7863 -#define PARA_INDIRECT(addr) *%cs:addr
7864 +#define PARA_INDIRECT(addr) *%ss:addr
7865 #endif
7866
7867 #define INTERRUPT_RETURN \
7868 @@ -1032,6 +1059,21 @@ extern void default_banner(void);
7869 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7870 CLBR_NONE, \
7871 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7872 +
7873 +#define GET_CR0_INTO_RDI \
7874 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7875 + mov %rax,%rdi
7876 +
7877 +#define SET_RDI_INTO_CR0 \
7878 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7879 +
7880 +#define GET_CR3_INTO_RDI \
7881 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7882 + mov %rax,%rdi
7883 +
7884 +#define SET_RDI_INTO_CR3 \
7885 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7886 +
7887 #endif /* CONFIG_X86_32 */
7888
7889 #endif /* __ASSEMBLY__ */
7890 diff -urNp linux-3.0.4/arch/x86/include/asm/paravirt_types.h linux-3.0.4/arch/x86/include/asm/paravirt_types.h
7891 --- linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
7892 +++ linux-3.0.4/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
7893 @@ -78,19 +78,19 @@ struct pv_init_ops {
7894 */
7895 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7896 unsigned long addr, unsigned len);
7897 -};
7898 +} __no_const;
7899
7900
7901 struct pv_lazy_ops {
7902 /* Set deferred update mode, used for batching operations. */
7903 void (*enter)(void);
7904 void (*leave)(void);
7905 -};
7906 +} __no_const;
7907
7908 struct pv_time_ops {
7909 unsigned long long (*sched_clock)(void);
7910 unsigned long (*get_tsc_khz)(void);
7911 -};
7912 +} __no_const;
7913
7914 struct pv_cpu_ops {
7915 /* hooks for various privileged instructions */
7916 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7917
7918 void (*start_context_switch)(struct task_struct *prev);
7919 void (*end_context_switch)(struct task_struct *next);
7920 -};
7921 +} __no_const;
7922
7923 struct pv_irq_ops {
7924 /*
7925 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7926 unsigned long start_eip,
7927 unsigned long start_esp);
7928 #endif
7929 -};
7930 +} __no_const;
7931
7932 struct pv_mmu_ops {
7933 unsigned long (*read_cr2)(void);
7934 @@ -306,6 +306,7 @@ struct pv_mmu_ops {
7935 struct paravirt_callee_save make_pud;
7936
7937 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
7938 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
7939 #endif /* PAGETABLE_LEVELS == 4 */
7940 #endif /* PAGETABLE_LEVELS >= 3 */
7941
7942 @@ -317,6 +318,12 @@ struct pv_mmu_ops {
7943 an mfn. We can tell which is which from the index. */
7944 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7945 phys_addr_t phys, pgprot_t flags);
7946 +
7947 +#ifdef CONFIG_PAX_KERNEXEC
7948 + unsigned long (*pax_open_kernel)(void);
7949 + unsigned long (*pax_close_kernel)(void);
7950 +#endif
7951 +
7952 };
7953
7954 struct arch_spinlock;
7955 @@ -327,7 +334,7 @@ struct pv_lock_ops {
7956 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7957 int (*spin_trylock)(struct arch_spinlock *lock);
7958 void (*spin_unlock)(struct arch_spinlock *lock);
7959 -};
7960 +} __no_const;
7961
7962 /* This contains all the paravirt structures: we get a convenient
7963 * number for each function using the offset which we use to indicate
7964 diff -urNp linux-3.0.4/arch/x86/include/asm/pgalloc.h linux-3.0.4/arch/x86/include/asm/pgalloc.h
7965 --- linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
7966 +++ linux-3.0.4/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
7967 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7968 pmd_t *pmd, pte_t *pte)
7969 {
7970 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7971 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7972 +}
7973 +
7974 +static inline void pmd_populate_user(struct mm_struct *mm,
7975 + pmd_t *pmd, pte_t *pte)
7976 +{
7977 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7978 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7979 }
7980
7981 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-2level.h linux-3.0.4/arch/x86/include/asm/pgtable-2level.h
7982 --- linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
7983 +++ linux-3.0.4/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
7984 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7985
7986 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7987 {
7988 + pax_open_kernel();
7989 *pmdp = pmd;
7990 + pax_close_kernel();
7991 }
7992
7993 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7994 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32.h linux-3.0.4/arch/x86/include/asm/pgtable_32.h
7995 --- linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
7996 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
7997 @@ -25,9 +25,6 @@
7998 struct mm_struct;
7999 struct vm_area_struct;
8000
8001 -extern pgd_t swapper_pg_dir[1024];
8002 -extern pgd_t initial_page_table[1024];
8003 -
8004 static inline void pgtable_cache_init(void) { }
8005 static inline void check_pgt_cache(void) { }
8006 void paging_init(void);
8007 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8008 # include <asm/pgtable-2level.h>
8009 #endif
8010
8011 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8012 +extern pgd_t initial_page_table[PTRS_PER_PGD];
8013 +#ifdef CONFIG_X86_PAE
8014 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8015 +#endif
8016 +
8017 #if defined(CONFIG_HIGHPTE)
8018 #define pte_offset_map(dir, address) \
8019 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8020 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8021 /* Clear a kernel PTE and flush it from the TLB */
8022 #define kpte_clear_flush(ptep, vaddr) \
8023 do { \
8024 + pax_open_kernel(); \
8025 pte_clear(&init_mm, (vaddr), (ptep)); \
8026 + pax_close_kernel(); \
8027 __flush_tlb_one((vaddr)); \
8028 } while (0)
8029
8030 @@ -74,6 +79,9 @@ do { \
8031
8032 #endif /* !__ASSEMBLY__ */
8033
8034 +#define HAVE_ARCH_UNMAPPED_AREA
8035 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8036 +
8037 /*
8038 * kern_addr_valid() is (1) for FLATMEM and (0) for
8039 * SPARSEMEM and DISCONTIGMEM
8040 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h
8041 --- linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8042 +++ linux-3.0.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8043 @@ -8,7 +8,7 @@
8044 */
8045 #ifdef CONFIG_X86_PAE
8046 # include <asm/pgtable-3level_types.h>
8047 -# define PMD_SIZE (1UL << PMD_SHIFT)
8048 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8049 # define PMD_MASK (~(PMD_SIZE - 1))
8050 #else
8051 # include <asm/pgtable-2level_types.h>
8052 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8053 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8054 #endif
8055
8056 +#ifdef CONFIG_PAX_KERNEXEC
8057 +#ifndef __ASSEMBLY__
8058 +extern unsigned char MODULES_EXEC_VADDR[];
8059 +extern unsigned char MODULES_EXEC_END[];
8060 +#endif
8061 +#include <asm/boot.h>
8062 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8063 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8064 +#else
8065 +#define ktla_ktva(addr) (addr)
8066 +#define ktva_ktla(addr) (addr)
8067 +#endif
8068 +
8069 #define MODULES_VADDR VMALLOC_START
8070 #define MODULES_END VMALLOC_END
8071 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8072 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable-3level.h linux-3.0.4/arch/x86/include/asm/pgtable-3level.h
8073 --- linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8074 +++ linux-3.0.4/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8075 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8076
8077 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8078 {
8079 + pax_open_kernel();
8080 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8081 + pax_close_kernel();
8082 }
8083
8084 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8085 {
8086 + pax_open_kernel();
8087 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8088 + pax_close_kernel();
8089 }
8090
8091 /*
8092 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64.h linux-3.0.4/arch/x86/include/asm/pgtable_64.h
8093 --- linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8094 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8095 @@ -16,10 +16,13 @@
8096
8097 extern pud_t level3_kernel_pgt[512];
8098 extern pud_t level3_ident_pgt[512];
8099 +extern pud_t level3_vmalloc_pgt[512];
8100 +extern pud_t level3_vmemmap_pgt[512];
8101 +extern pud_t level2_vmemmap_pgt[512];
8102 extern pmd_t level2_kernel_pgt[512];
8103 extern pmd_t level2_fixmap_pgt[512];
8104 -extern pmd_t level2_ident_pgt[512];
8105 -extern pgd_t init_level4_pgt[];
8106 +extern pmd_t level2_ident_pgt[512*2];
8107 +extern pgd_t init_level4_pgt[512];
8108
8109 #define swapper_pg_dir init_level4_pgt
8110
8111 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8112
8113 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8114 {
8115 + pax_open_kernel();
8116 *pmdp = pmd;
8117 + pax_close_kernel();
8118 }
8119
8120 static inline void native_pmd_clear(pmd_t *pmd)
8121 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8122
8123 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8124 {
8125 + pax_open_kernel();
8126 + *pgdp = pgd;
8127 + pax_close_kernel();
8128 +}
8129 +
8130 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8131 +{
8132 *pgdp = pgd;
8133 }
8134
8135 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h
8136 --- linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8137 +++ linux-3.0.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8138 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8139 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8140 #define MODULES_END _AC(0xffffffffff000000, UL)
8141 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8142 +#define MODULES_EXEC_VADDR MODULES_VADDR
8143 +#define MODULES_EXEC_END MODULES_END
8144 +
8145 +#define ktla_ktva(addr) (addr)
8146 +#define ktva_ktla(addr) (addr)
8147
8148 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8149 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable.h linux-3.0.4/arch/x86/include/asm/pgtable.h
8150 --- linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8151 +++ linux-3.0.4/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8152 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8153
8154 #ifndef __PAGETABLE_PUD_FOLDED
8155 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8156 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8157 #define pgd_clear(pgd) native_pgd_clear(pgd)
8158 #endif
8159
8160 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8161
8162 #define arch_end_context_switch(prev) do {} while(0)
8163
8164 +#define pax_open_kernel() native_pax_open_kernel()
8165 +#define pax_close_kernel() native_pax_close_kernel()
8166 #endif /* CONFIG_PARAVIRT */
8167
8168 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8169 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8170 +
8171 +#ifdef CONFIG_PAX_KERNEXEC
8172 +static inline unsigned long native_pax_open_kernel(void)
8173 +{
8174 + unsigned long cr0;
8175 +
8176 + preempt_disable();
8177 + barrier();
8178 + cr0 = read_cr0() ^ X86_CR0_WP;
8179 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8180 + write_cr0(cr0);
8181 + return cr0 ^ X86_CR0_WP;
8182 +}
8183 +
8184 +static inline unsigned long native_pax_close_kernel(void)
8185 +{
8186 + unsigned long cr0;
8187 +
8188 + cr0 = read_cr0() ^ X86_CR0_WP;
8189 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8190 + write_cr0(cr0);
8191 + barrier();
8192 + preempt_enable_no_resched();
8193 + return cr0 ^ X86_CR0_WP;
8194 +}
8195 +#else
8196 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8197 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8198 +#endif
8199 +
8200 /*
8201 * The following only work if pte_present() is true.
8202 * Undefined behaviour if not..
8203 */
8204 +static inline int pte_user(pte_t pte)
8205 +{
8206 + return pte_val(pte) & _PAGE_USER;
8207 +}
8208 +
8209 static inline int pte_dirty(pte_t pte)
8210 {
8211 return pte_flags(pte) & _PAGE_DIRTY;
8212 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8213 return pte_clear_flags(pte, _PAGE_RW);
8214 }
8215
8216 +static inline pte_t pte_mkread(pte_t pte)
8217 +{
8218 + return __pte(pte_val(pte) | _PAGE_USER);
8219 +}
8220 +
8221 static inline pte_t pte_mkexec(pte_t pte)
8222 {
8223 - return pte_clear_flags(pte, _PAGE_NX);
8224 +#ifdef CONFIG_X86_PAE
8225 + if (__supported_pte_mask & _PAGE_NX)
8226 + return pte_clear_flags(pte, _PAGE_NX);
8227 + else
8228 +#endif
8229 + return pte_set_flags(pte, _PAGE_USER);
8230 +}
8231 +
8232 +static inline pte_t pte_exprotect(pte_t pte)
8233 +{
8234 +#ifdef CONFIG_X86_PAE
8235 + if (__supported_pte_mask & _PAGE_NX)
8236 + return pte_set_flags(pte, _PAGE_NX);
8237 + else
8238 +#endif
8239 + return pte_clear_flags(pte, _PAGE_USER);
8240 }
8241
8242 static inline pte_t pte_mkdirty(pte_t pte)
8243 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8244 #endif
8245
8246 #ifndef __ASSEMBLY__
8247 +
8248 +#ifdef CONFIG_PAX_PER_CPU_PGD
8249 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8250 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8251 +{
8252 + return cpu_pgd[cpu];
8253 +}
8254 +#endif
8255 +
8256 #include <linux/mm_types.h>
8257
8258 static inline int pte_none(pte_t pte)
8259 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8260
8261 static inline int pgd_bad(pgd_t pgd)
8262 {
8263 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8264 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8265 }
8266
8267 static inline int pgd_none(pgd_t pgd)
8268 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8269 * pgd_offset() returns a (pgd_t *)
8270 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8271 */
8272 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8273 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8274 +
8275 +#ifdef CONFIG_PAX_PER_CPU_PGD
8276 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8277 +#endif
8278 +
8279 /*
8280 * a shortcut which implies the use of the kernel's pgd, instead
8281 * of a process's
8282 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8283 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8284 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8285
8286 +#ifdef CONFIG_X86_32
8287 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8288 +#else
8289 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8290 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8291 +
8292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8293 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8294 +#else
8295 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8296 +#endif
8297 +
8298 +#endif
8299 +
8300 #ifndef __ASSEMBLY__
8301
8302 extern int direct_gbpages;
8303 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8304 * dst and src can be on the same page, but the range must not overlap,
8305 * and must not cross a page boundary.
8306 */
8307 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8308 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8309 {
8310 - memcpy(dst, src, count * sizeof(pgd_t));
8311 + pax_open_kernel();
8312 + while (count--)
8313 + *dst++ = *src++;
8314 + pax_close_kernel();
8315 }
8316
8317 +#ifdef CONFIG_PAX_PER_CPU_PGD
8318 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8319 +#endif
8320 +
8321 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8322 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8323 +#else
8324 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8325 +#endif
8326
8327 #include <asm-generic/pgtable.h>
8328 #endif /* __ASSEMBLY__ */
8329 diff -urNp linux-3.0.4/arch/x86/include/asm/pgtable_types.h linux-3.0.4/arch/x86/include/asm/pgtable_types.h
8330 --- linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8331 +++ linux-3.0.4/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8332 @@ -16,13 +16,12 @@
8333 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8334 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8335 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8336 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8337 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8338 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8339 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8340 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8341 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8342 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8343 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8344 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8345 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8346 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8347
8348 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8349 @@ -40,7 +39,6 @@
8350 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8351 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8352 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8353 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8354 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8355 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8356 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8357 @@ -57,8 +55,10 @@
8358
8359 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8360 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8361 -#else
8362 +#elif defined(CONFIG_KMEMCHECK)
8363 #define _PAGE_NX (_AT(pteval_t, 0))
8364 +#else
8365 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8366 #endif
8367
8368 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8369 @@ -96,6 +96,9 @@
8370 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8371 _PAGE_ACCESSED)
8372
8373 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8374 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8375 +
8376 #define __PAGE_KERNEL_EXEC \
8377 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8378 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8379 @@ -106,8 +109,8 @@
8380 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8381 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8382 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8383 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8384 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8385 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8386 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8387 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8388 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8389 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8390 @@ -166,8 +169,8 @@
8391 * bits are combined, this will alow user to access the high address mapped
8392 * VDSO in the presence of CONFIG_COMPAT_VDSO
8393 */
8394 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8395 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8396 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8397 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8398 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8399 #endif
8400
8401 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8402 {
8403 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8404 }
8405 +#endif
8406
8407 +#if PAGETABLE_LEVELS == 3
8408 +#include <asm-generic/pgtable-nopud.h>
8409 +#endif
8410 +
8411 +#if PAGETABLE_LEVELS == 2
8412 +#include <asm-generic/pgtable-nopmd.h>
8413 +#endif
8414 +
8415 +#ifndef __ASSEMBLY__
8416 #if PAGETABLE_LEVELS > 3
8417 typedef struct { pudval_t pud; } pud_t;
8418
8419 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8420 return pud.pud;
8421 }
8422 #else
8423 -#include <asm-generic/pgtable-nopud.h>
8424 -
8425 static inline pudval_t native_pud_val(pud_t pud)
8426 {
8427 return native_pgd_val(pud.pgd);
8428 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8429 return pmd.pmd;
8430 }
8431 #else
8432 -#include <asm-generic/pgtable-nopmd.h>
8433 -
8434 static inline pmdval_t native_pmd_val(pmd_t pmd)
8435 {
8436 return native_pgd_val(pmd.pud.pgd);
8437 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8438
8439 extern pteval_t __supported_pte_mask;
8440 extern void set_nx(void);
8441 -extern int nx_enabled;
8442
8443 #define pgprot_writecombine pgprot_writecombine
8444 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8445 diff -urNp linux-3.0.4/arch/x86/include/asm/processor.h linux-3.0.4/arch/x86/include/asm/processor.h
8446 --- linux-3.0.4/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8447 +++ linux-3.0.4/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8448 @@ -266,7 +266,7 @@ struct tss_struct {
8449
8450 } ____cacheline_aligned;
8451
8452 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8453 +extern struct tss_struct init_tss[NR_CPUS];
8454
8455 /*
8456 * Save the original ist values for checking stack pointers during debugging
8457 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8458 */
8459 #define TASK_SIZE PAGE_OFFSET
8460 #define TASK_SIZE_MAX TASK_SIZE
8461 +
8462 +#ifdef CONFIG_PAX_SEGMEXEC
8463 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8464 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8465 +#else
8466 #define STACK_TOP TASK_SIZE
8467 -#define STACK_TOP_MAX STACK_TOP
8468 +#endif
8469 +
8470 +#define STACK_TOP_MAX TASK_SIZE
8471
8472 #define INIT_THREAD { \
8473 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8474 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8475 .vm86_info = NULL, \
8476 .sysenter_cs = __KERNEL_CS, \
8477 .io_bitmap_ptr = NULL, \
8478 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8479 */
8480 #define INIT_TSS { \
8481 .x86_tss = { \
8482 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8483 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8484 .ss0 = __KERNEL_DS, \
8485 .ss1 = __KERNEL_CS, \
8486 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8487 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8488 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8489
8490 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8491 -#define KSTK_TOP(info) \
8492 -({ \
8493 - unsigned long *__ptr = (unsigned long *)(info); \
8494 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8495 -})
8496 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8497
8498 /*
8499 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8500 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8501 #define task_pt_regs(task) \
8502 ({ \
8503 struct pt_regs *__regs__; \
8504 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8505 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8506 __regs__ - 1; \
8507 })
8508
8509 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8510 /*
8511 * User space process size. 47bits minus one guard page.
8512 */
8513 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8514 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8515
8516 /* This decides where the kernel will search for a free chunk of vm
8517 * space during mmap's.
8518 */
8519 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8520 - 0xc0000000 : 0xFFFFe000)
8521 + 0xc0000000 : 0xFFFFf000)
8522
8523 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8524 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8525 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8526 #define STACK_TOP_MAX TASK_SIZE_MAX
8527
8528 #define INIT_THREAD { \
8529 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8530 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8531 }
8532
8533 #define INIT_TSS { \
8534 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8535 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8536 }
8537
8538 /*
8539 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8540 */
8541 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8542
8543 +#ifdef CONFIG_PAX_SEGMEXEC
8544 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8545 +#endif
8546 +
8547 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8548
8549 /* Get/set a process' ability to use the timestamp counter instruction */
8550 diff -urNp linux-3.0.4/arch/x86/include/asm/ptrace.h linux-3.0.4/arch/x86/include/asm/ptrace.h
8551 --- linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8552 +++ linux-3.0.4/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8553 @@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8554 }
8555
8556 /*
8557 - * user_mode_vm(regs) determines whether a register set came from user mode.
8558 + * user_mode(regs) determines whether a register set came from user mode.
8559 * This is true if V8086 mode was enabled OR if the register set was from
8560 * protected mode with RPL-3 CS value. This tricky test checks that with
8561 * one comparison. Many places in the kernel can bypass this full check
8562 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8563 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8564 + * be used.
8565 */
8566 -static inline int user_mode(struct pt_regs *regs)
8567 +static inline int user_mode_novm(struct pt_regs *regs)
8568 {
8569 #ifdef CONFIG_X86_32
8570 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8571 #else
8572 - return !!(regs->cs & 3);
8573 + return !!(regs->cs & SEGMENT_RPL_MASK);
8574 #endif
8575 }
8576
8577 -static inline int user_mode_vm(struct pt_regs *regs)
8578 +static inline int user_mode(struct pt_regs *regs)
8579 {
8580 #ifdef CONFIG_X86_32
8581 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8582 USER_RPL;
8583 #else
8584 - return user_mode(regs);
8585 + return user_mode_novm(regs);
8586 #endif
8587 }
8588
8589 diff -urNp linux-3.0.4/arch/x86/include/asm/reboot.h linux-3.0.4/arch/x86/include/asm/reboot.h
8590 --- linux-3.0.4/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8591 +++ linux-3.0.4/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8592 @@ -6,19 +6,19 @@
8593 struct pt_regs;
8594
8595 struct machine_ops {
8596 - void (*restart)(char *cmd);
8597 - void (*halt)(void);
8598 - void (*power_off)(void);
8599 + void (* __noreturn restart)(char *cmd);
8600 + void (* __noreturn halt)(void);
8601 + void (* __noreturn power_off)(void);
8602 void (*shutdown)(void);
8603 void (*crash_shutdown)(struct pt_regs *);
8604 - void (*emergency_restart)(void);
8605 -};
8606 + void (* __noreturn emergency_restart)(void);
8607 +} __no_const;
8608
8609 extern struct machine_ops machine_ops;
8610
8611 void native_machine_crash_shutdown(struct pt_regs *regs);
8612 void native_machine_shutdown(void);
8613 -void machine_real_restart(unsigned int type);
8614 +void machine_real_restart(unsigned int type) __noreturn;
8615 /* These must match dispatch_table in reboot_32.S */
8616 #define MRR_BIOS 0
8617 #define MRR_APM 1
8618 diff -urNp linux-3.0.4/arch/x86/include/asm/rwsem.h linux-3.0.4/arch/x86/include/asm/rwsem.h
8619 --- linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8620 +++ linux-3.0.4/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8621 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8622 {
8623 asm volatile("# beginning down_read\n\t"
8624 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8625 +
8626 +#ifdef CONFIG_PAX_REFCOUNT
8627 + "jno 0f\n"
8628 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8629 + "int $4\n0:\n"
8630 + _ASM_EXTABLE(0b, 0b)
8631 +#endif
8632 +
8633 /* adds 0x00000001 */
8634 " jns 1f\n"
8635 " call call_rwsem_down_read_failed\n"
8636 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8637 "1:\n\t"
8638 " mov %1,%2\n\t"
8639 " add %3,%2\n\t"
8640 +
8641 +#ifdef CONFIG_PAX_REFCOUNT
8642 + "jno 0f\n"
8643 + "sub %3,%2\n"
8644 + "int $4\n0:\n"
8645 + _ASM_EXTABLE(0b, 0b)
8646 +#endif
8647 +
8648 " jle 2f\n\t"
8649 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8650 " jnz 1b\n\t"
8651 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8652 long tmp;
8653 asm volatile("# beginning down_write\n\t"
8654 LOCK_PREFIX " xadd %1,(%2)\n\t"
8655 +
8656 +#ifdef CONFIG_PAX_REFCOUNT
8657 + "jno 0f\n"
8658 + "mov %1,(%2)\n"
8659 + "int $4\n0:\n"
8660 + _ASM_EXTABLE(0b, 0b)
8661 +#endif
8662 +
8663 /* adds 0xffff0001, returns the old value */
8664 " test %1,%1\n\t"
8665 /* was the count 0 before? */
8666 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8667 long tmp;
8668 asm volatile("# beginning __up_read\n\t"
8669 LOCK_PREFIX " xadd %1,(%2)\n\t"
8670 +
8671 +#ifdef CONFIG_PAX_REFCOUNT
8672 + "jno 0f\n"
8673 + "mov %1,(%2)\n"
8674 + "int $4\n0:\n"
8675 + _ASM_EXTABLE(0b, 0b)
8676 +#endif
8677 +
8678 /* subtracts 1, returns the old value */
8679 " jns 1f\n\t"
8680 " call call_rwsem_wake\n" /* expects old value in %edx */
8681 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8682 long tmp;
8683 asm volatile("# beginning __up_write\n\t"
8684 LOCK_PREFIX " xadd %1,(%2)\n\t"
8685 +
8686 +#ifdef CONFIG_PAX_REFCOUNT
8687 + "jno 0f\n"
8688 + "mov %1,(%2)\n"
8689 + "int $4\n0:\n"
8690 + _ASM_EXTABLE(0b, 0b)
8691 +#endif
8692 +
8693 /* subtracts 0xffff0001, returns the old value */
8694 " jns 1f\n\t"
8695 " call call_rwsem_wake\n" /* expects old value in %edx */
8696 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8697 {
8698 asm volatile("# beginning __downgrade_write\n\t"
8699 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8700 +
8701 +#ifdef CONFIG_PAX_REFCOUNT
8702 + "jno 0f\n"
8703 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8704 + "int $4\n0:\n"
8705 + _ASM_EXTABLE(0b, 0b)
8706 +#endif
8707 +
8708 /*
8709 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8710 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8711 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8712 */
8713 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8714 {
8715 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8716 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8717 +
8718 +#ifdef CONFIG_PAX_REFCOUNT
8719 + "jno 0f\n"
8720 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8721 + "int $4\n0:\n"
8722 + _ASM_EXTABLE(0b, 0b)
8723 +#endif
8724 +
8725 : "+m" (sem->count)
8726 : "er" (delta));
8727 }
8728 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8729 {
8730 long tmp = delta;
8731
8732 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8733 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8734 +
8735 +#ifdef CONFIG_PAX_REFCOUNT
8736 + "jno 0f\n"
8737 + "mov %0,%1\n"
8738 + "int $4\n0:\n"
8739 + _ASM_EXTABLE(0b, 0b)
8740 +#endif
8741 +
8742 : "+r" (tmp), "+m" (sem->count)
8743 : : "memory");
8744
8745 diff -urNp linux-3.0.4/arch/x86/include/asm/segment.h linux-3.0.4/arch/x86/include/asm/segment.h
8746 --- linux-3.0.4/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8747 +++ linux-3.0.4/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8748 @@ -64,10 +64,15 @@
8749 * 26 - ESPFIX small SS
8750 * 27 - per-cpu [ offset to per-cpu data area ]
8751 * 28 - stack_canary-20 [ for stack protector ]
8752 - * 29 - unused
8753 - * 30 - unused
8754 + * 29 - PCI BIOS CS
8755 + * 30 - PCI BIOS DS
8756 * 31 - TSS for double fault handler
8757 */
8758 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8759 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8760 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8761 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8762 +
8763 #define GDT_ENTRY_TLS_MIN 6
8764 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8765
8766 @@ -79,6 +84,8 @@
8767
8768 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8769
8770 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8771 +
8772 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8773
8774 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8775 @@ -104,6 +111,12 @@
8776 #define __KERNEL_STACK_CANARY 0
8777 #endif
8778
8779 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8780 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8781 +
8782 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8783 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8784 +
8785 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8786
8787 /*
8788 @@ -141,7 +154,7 @@
8789 */
8790
8791 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8792 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8793 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8794
8795
8796 #else
8797 @@ -165,6 +178,8 @@
8798 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8799 #define __USER32_DS __USER_DS
8800
8801 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8802 +
8803 #define GDT_ENTRY_TSS 8 /* needs two entries */
8804 #define GDT_ENTRY_LDT 10 /* needs two entries */
8805 #define GDT_ENTRY_TLS_MIN 12
8806 @@ -185,6 +200,7 @@
8807 #endif
8808
8809 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8810 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8811 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8812 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8813 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8814 diff -urNp linux-3.0.4/arch/x86/include/asm/smp.h linux-3.0.4/arch/x86/include/asm/smp.h
8815 --- linux-3.0.4/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8816 +++ linux-3.0.4/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8817 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8818 /* cpus sharing the last level cache: */
8819 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8820 DECLARE_PER_CPU(u16, cpu_llc_id);
8821 -DECLARE_PER_CPU(int, cpu_number);
8822 +DECLARE_PER_CPU(unsigned int, cpu_number);
8823
8824 static inline struct cpumask *cpu_sibling_mask(int cpu)
8825 {
8826 @@ -77,7 +77,7 @@ struct smp_ops {
8827
8828 void (*send_call_func_ipi)(const struct cpumask *mask);
8829 void (*send_call_func_single_ipi)(int cpu);
8830 -};
8831 +} __no_const;
8832
8833 /* Globals due to paravirt */
8834 extern void set_cpu_sibling_map(int cpu);
8835 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8836 extern int safe_smp_processor_id(void);
8837
8838 #elif defined(CONFIG_X86_64_SMP)
8839 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8840 -
8841 -#define stack_smp_processor_id() \
8842 -({ \
8843 - struct thread_info *ti; \
8844 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8845 - ti->cpu; \
8846 -})
8847 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8848 +#define stack_smp_processor_id() raw_smp_processor_id()
8849 #define safe_smp_processor_id() smp_processor_id()
8850
8851 #endif
8852 diff -urNp linux-3.0.4/arch/x86/include/asm/spinlock.h linux-3.0.4/arch/x86/include/asm/spinlock.h
8853 --- linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8854 +++ linux-3.0.4/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8855 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8856 static inline void arch_read_lock(arch_rwlock_t *rw)
8857 {
8858 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8859 +
8860 +#ifdef CONFIG_PAX_REFCOUNT
8861 + "jno 0f\n"
8862 + LOCK_PREFIX " addl $1,(%0)\n"
8863 + "int $4\n0:\n"
8864 + _ASM_EXTABLE(0b, 0b)
8865 +#endif
8866 +
8867 "jns 1f\n"
8868 "call __read_lock_failed\n\t"
8869 "1:\n"
8870 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8871 static inline void arch_write_lock(arch_rwlock_t *rw)
8872 {
8873 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8874 +
8875 +#ifdef CONFIG_PAX_REFCOUNT
8876 + "jno 0f\n"
8877 + LOCK_PREFIX " addl %1,(%0)\n"
8878 + "int $4\n0:\n"
8879 + _ASM_EXTABLE(0b, 0b)
8880 +#endif
8881 +
8882 "jz 1f\n"
8883 "call __write_lock_failed\n\t"
8884 "1:\n"
8885 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8886
8887 static inline void arch_read_unlock(arch_rwlock_t *rw)
8888 {
8889 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8890 + asm volatile(LOCK_PREFIX "incl %0\n"
8891 +
8892 +#ifdef CONFIG_PAX_REFCOUNT
8893 + "jno 0f\n"
8894 + LOCK_PREFIX "decl %0\n"
8895 + "int $4\n0:\n"
8896 + _ASM_EXTABLE(0b, 0b)
8897 +#endif
8898 +
8899 + :"+m" (rw->lock) : : "memory");
8900 }
8901
8902 static inline void arch_write_unlock(arch_rwlock_t *rw)
8903 {
8904 - asm volatile(LOCK_PREFIX "addl %1, %0"
8905 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8906 +
8907 +#ifdef CONFIG_PAX_REFCOUNT
8908 + "jno 0f\n"
8909 + LOCK_PREFIX "subl %1, %0\n"
8910 + "int $4\n0:\n"
8911 + _ASM_EXTABLE(0b, 0b)
8912 +#endif
8913 +
8914 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8915 }
8916
8917 diff -urNp linux-3.0.4/arch/x86/include/asm/stackprotector.h linux-3.0.4/arch/x86/include/asm/stackprotector.h
8918 --- linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
8919 +++ linux-3.0.4/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
8920 @@ -48,7 +48,7 @@
8921 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8922 */
8923 #define GDT_STACK_CANARY_INIT \
8924 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8925 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8926
8927 /*
8928 * Initialize the stackprotector canary value.
8929 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8930
8931 static inline void load_stack_canary_segment(void)
8932 {
8933 -#ifdef CONFIG_X86_32
8934 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8935 asm volatile ("mov %0, %%gs" : : "r" (0));
8936 #endif
8937 }
8938 diff -urNp linux-3.0.4/arch/x86/include/asm/stacktrace.h linux-3.0.4/arch/x86/include/asm/stacktrace.h
8939 --- linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
8940 +++ linux-3.0.4/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
8941 @@ -11,28 +11,20 @@
8942
8943 extern int kstack_depth_to_print;
8944
8945 -struct thread_info;
8946 +struct task_struct;
8947 struct stacktrace_ops;
8948
8949 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8950 - unsigned long *stack,
8951 - unsigned long bp,
8952 - const struct stacktrace_ops *ops,
8953 - void *data,
8954 - unsigned long *end,
8955 - int *graph);
8956 -
8957 -extern unsigned long
8958 -print_context_stack(struct thread_info *tinfo,
8959 - unsigned long *stack, unsigned long bp,
8960 - const struct stacktrace_ops *ops, void *data,
8961 - unsigned long *end, int *graph);
8962 -
8963 -extern unsigned long
8964 -print_context_stack_bp(struct thread_info *tinfo,
8965 - unsigned long *stack, unsigned long bp,
8966 - const struct stacktrace_ops *ops, void *data,
8967 - unsigned long *end, int *graph);
8968 +typedef unsigned long walk_stack_t(struct task_struct *task,
8969 + void *stack_start,
8970 + unsigned long *stack,
8971 + unsigned long bp,
8972 + const struct stacktrace_ops *ops,
8973 + void *data,
8974 + unsigned long *end,
8975 + int *graph);
8976 +
8977 +extern walk_stack_t print_context_stack;
8978 +extern walk_stack_t print_context_stack_bp;
8979
8980 /* Generic stack tracer with callbacks */
8981
8982 @@ -40,7 +32,7 @@ struct stacktrace_ops {
8983 void (*address)(void *data, unsigned long address, int reliable);
8984 /* On negative return stop dumping */
8985 int (*stack)(void *data, char *name);
8986 - walk_stack_t walk_stack;
8987 + walk_stack_t *walk_stack;
8988 };
8989
8990 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8991 diff -urNp linux-3.0.4/arch/x86/include/asm/system.h linux-3.0.4/arch/x86/include/asm/system.h
8992 --- linux-3.0.4/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
8993 +++ linux-3.0.4/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
8994 @@ -129,7 +129,7 @@ do { \
8995 "call __switch_to\n\t" \
8996 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8997 __switch_canary \
8998 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8999 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9000 "movq %%rax,%%rdi\n\t" \
9001 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9002 "jnz ret_from_fork\n\t" \
9003 @@ -140,7 +140,7 @@ do { \
9004 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9005 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9006 [_tif_fork] "i" (_TIF_FORK), \
9007 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
9008 + [thread_info] "m" (current_tinfo), \
9009 [current_task] "m" (current_task) \
9010 __switch_canary_iparam \
9011 : "memory", "cc" __EXTRA_CLOBBER)
9012 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9013 {
9014 unsigned long __limit;
9015 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9016 - return __limit + 1;
9017 + return __limit;
9018 }
9019
9020 static inline void native_clts(void)
9021 @@ -397,12 +397,12 @@ void enable_hlt(void);
9022
9023 void cpu_idle_wait(void);
9024
9025 -extern unsigned long arch_align_stack(unsigned long sp);
9026 +#define arch_align_stack(x) ((x) & ~0xfUL)
9027 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9028
9029 void default_idle(void);
9030
9031 -void stop_this_cpu(void *dummy);
9032 +void stop_this_cpu(void *dummy) __noreturn;
9033
9034 /*
9035 * Force strict CPU ordering.
9036 diff -urNp linux-3.0.4/arch/x86/include/asm/thread_info.h linux-3.0.4/arch/x86/include/asm/thread_info.h
9037 --- linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9038 +++ linux-3.0.4/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9039 @@ -10,6 +10,7 @@
9040 #include <linux/compiler.h>
9041 #include <asm/page.h>
9042 #include <asm/types.h>
9043 +#include <asm/percpu.h>
9044
9045 /*
9046 * low level task data that entry.S needs immediate access to
9047 @@ -24,7 +25,6 @@ struct exec_domain;
9048 #include <asm/atomic.h>
9049
9050 struct thread_info {
9051 - struct task_struct *task; /* main task structure */
9052 struct exec_domain *exec_domain; /* execution domain */
9053 __u32 flags; /* low level flags */
9054 __u32 status; /* thread synchronous flags */
9055 @@ -34,18 +34,12 @@ struct thread_info {
9056 mm_segment_t addr_limit;
9057 struct restart_block restart_block;
9058 void __user *sysenter_return;
9059 -#ifdef CONFIG_X86_32
9060 - unsigned long previous_esp; /* ESP of the previous stack in
9061 - case of nested (IRQ) stacks
9062 - */
9063 - __u8 supervisor_stack[0];
9064 -#endif
9065 + unsigned long lowest_stack;
9066 int uaccess_err;
9067 };
9068
9069 -#define INIT_THREAD_INFO(tsk) \
9070 +#define INIT_THREAD_INFO \
9071 { \
9072 - .task = &tsk, \
9073 .exec_domain = &default_exec_domain, \
9074 .flags = 0, \
9075 .cpu = 0, \
9076 @@ -56,7 +50,7 @@ struct thread_info {
9077 }, \
9078 }
9079
9080 -#define init_thread_info (init_thread_union.thread_info)
9081 +#define init_thread_info (init_thread_union.stack)
9082 #define init_stack (init_thread_union.stack)
9083
9084 #else /* !__ASSEMBLY__ */
9085 @@ -170,6 +164,23 @@ struct thread_info {
9086 ret; \
9087 })
9088
9089 +#ifdef __ASSEMBLY__
9090 +/* how to get the thread information struct from ASM */
9091 +#define GET_THREAD_INFO(reg) \
9092 + mov PER_CPU_VAR(current_tinfo), reg
9093 +
9094 +/* use this one if reg already contains %esp */
9095 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9096 +#else
9097 +/* how to get the thread information struct from C */
9098 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9099 +
9100 +static __always_inline struct thread_info *current_thread_info(void)
9101 +{
9102 + return percpu_read_stable(current_tinfo);
9103 +}
9104 +#endif
9105 +
9106 #ifdef CONFIG_X86_32
9107
9108 #define STACK_WARN (THREAD_SIZE/8)
9109 @@ -180,35 +191,13 @@ struct thread_info {
9110 */
9111 #ifndef __ASSEMBLY__
9112
9113 -
9114 /* how to get the current stack pointer from C */
9115 register unsigned long current_stack_pointer asm("esp") __used;
9116
9117 -/* how to get the thread information struct from C */
9118 -static inline struct thread_info *current_thread_info(void)
9119 -{
9120 - return (struct thread_info *)
9121 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9122 -}
9123 -
9124 -#else /* !__ASSEMBLY__ */
9125 -
9126 -/* how to get the thread information struct from ASM */
9127 -#define GET_THREAD_INFO(reg) \
9128 - movl $-THREAD_SIZE, reg; \
9129 - andl %esp, reg
9130 -
9131 -/* use this one if reg already contains %esp */
9132 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9133 - andl $-THREAD_SIZE, reg
9134 -
9135 #endif
9136
9137 #else /* X86_32 */
9138
9139 -#include <asm/percpu.h>
9140 -#define KERNEL_STACK_OFFSET (5*8)
9141 -
9142 /*
9143 * macros/functions for gaining access to the thread information structure
9144 * preempt_count needs to be 1 initially, until the scheduler is functional.
9145 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9146 #ifndef __ASSEMBLY__
9147 DECLARE_PER_CPU(unsigned long, kernel_stack);
9148
9149 -static inline struct thread_info *current_thread_info(void)
9150 -{
9151 - struct thread_info *ti;
9152 - ti = (void *)(percpu_read_stable(kernel_stack) +
9153 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9154 - return ti;
9155 -}
9156 -
9157 -#else /* !__ASSEMBLY__ */
9158 -
9159 -/* how to get the thread information struct from ASM */
9160 -#define GET_THREAD_INFO(reg) \
9161 - movq PER_CPU_VAR(kernel_stack),reg ; \
9162 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9163 -
9164 +/* how to get the current stack pointer from C */
9165 +register unsigned long current_stack_pointer asm("rsp") __used;
9166 #endif
9167
9168 #endif /* !X86_32 */
9169 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9170 extern void free_thread_info(struct thread_info *ti);
9171 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9172 #define arch_task_cache_init arch_task_cache_init
9173 +
9174 +#define __HAVE_THREAD_FUNCTIONS
9175 +#define task_thread_info(task) (&(task)->tinfo)
9176 +#define task_stack_page(task) ((task)->stack)
9177 +#define setup_thread_stack(p, org) do {} while (0)
9178 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9179 +
9180 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9181 +extern struct task_struct *alloc_task_struct_node(int node);
9182 +extern void free_task_struct(struct task_struct *);
9183 +
9184 #endif
9185 #endif /* _ASM_X86_THREAD_INFO_H */
9186 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_32.h linux-3.0.4/arch/x86/include/asm/uaccess_32.h
9187 --- linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9188 +++ linux-3.0.4/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9189 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9190 static __always_inline unsigned long __must_check
9191 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9192 {
9193 + pax_track_stack();
9194 +
9195 + if ((long)n < 0)
9196 + return n;
9197 +
9198 if (__builtin_constant_p(n)) {
9199 unsigned long ret;
9200
9201 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9202 return ret;
9203 }
9204 }
9205 + if (!__builtin_constant_p(n))
9206 + check_object_size(from, n, true);
9207 return __copy_to_user_ll(to, from, n);
9208 }
9209
9210 @@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9211 __copy_to_user(void __user *to, const void *from, unsigned long n)
9212 {
9213 might_fault();
9214 +
9215 return __copy_to_user_inatomic(to, from, n);
9216 }
9217
9218 static __always_inline unsigned long
9219 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9220 {
9221 + if ((long)n < 0)
9222 + return n;
9223 +
9224 /* Avoid zeroing the tail if the copy fails..
9225 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9226 * but as the zeroing behaviour is only significant when n is not
9227 @@ -137,6 +148,12 @@ static __always_inline unsigned long
9228 __copy_from_user(void *to, const void __user *from, unsigned long n)
9229 {
9230 might_fault();
9231 +
9232 + pax_track_stack();
9233 +
9234 + if ((long)n < 0)
9235 + return n;
9236 +
9237 if (__builtin_constant_p(n)) {
9238 unsigned long ret;
9239
9240 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9241 return ret;
9242 }
9243 }
9244 + if (!__builtin_constant_p(n))
9245 + check_object_size(to, n, false);
9246 return __copy_from_user_ll(to, from, n);
9247 }
9248
9249 @@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9250 const void __user *from, unsigned long n)
9251 {
9252 might_fault();
9253 +
9254 + if ((long)n < 0)
9255 + return n;
9256 +
9257 if (__builtin_constant_p(n)) {
9258 unsigned long ret;
9259
9260 @@ -181,15 +204,19 @@ static __always_inline unsigned long
9261 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9262 unsigned long n)
9263 {
9264 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9265 -}
9266 + if ((long)n < 0)
9267 + return n;
9268
9269 -unsigned long __must_check copy_to_user(void __user *to,
9270 - const void *from, unsigned long n);
9271 -unsigned long __must_check _copy_from_user(void *to,
9272 - const void __user *from,
9273 - unsigned long n);
9274 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9275 +}
9276
9277 +extern void copy_to_user_overflow(void)
9278 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9279 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9280 +#else
9281 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9282 +#endif
9283 +;
9284
9285 extern void copy_from_user_overflow(void)
9286 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9287 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9288 #endif
9289 ;
9290
9291 -static inline unsigned long __must_check copy_from_user(void *to,
9292 - const void __user *from,
9293 - unsigned long n)
9294 +/**
9295 + * copy_to_user: - Copy a block of data into user space.
9296 + * @to: Destination address, in user space.
9297 + * @from: Source address, in kernel space.
9298 + * @n: Number of bytes to copy.
9299 + *
9300 + * Context: User context only. This function may sleep.
9301 + *
9302 + * Copy data from kernel space to user space.
9303 + *
9304 + * Returns number of bytes that could not be copied.
9305 + * On success, this will be zero.
9306 + */
9307 +static inline unsigned long __must_check
9308 +copy_to_user(void __user *to, const void *from, unsigned long n)
9309 +{
9310 + int sz = __compiletime_object_size(from);
9311 +
9312 + if (unlikely(sz != -1 && sz < n))
9313 + copy_to_user_overflow();
9314 + else if (access_ok(VERIFY_WRITE, to, n))
9315 + n = __copy_to_user(to, from, n);
9316 + return n;
9317 +}
9318 +
9319 +/**
9320 + * copy_from_user: - Copy a block of data from user space.
9321 + * @to: Destination address, in kernel space.
9322 + * @from: Source address, in user space.
9323 + * @n: Number of bytes to copy.
9324 + *
9325 + * Context: User context only. This function may sleep.
9326 + *
9327 + * Copy data from user space to kernel space.
9328 + *
9329 + * Returns number of bytes that could not be copied.
9330 + * On success, this will be zero.
9331 + *
9332 + * If some data could not be copied, this function will pad the copied
9333 + * data to the requested size using zero bytes.
9334 + */
9335 +static inline unsigned long __must_check
9336 +copy_from_user(void *to, const void __user *from, unsigned long n)
9337 {
9338 int sz = __compiletime_object_size(to);
9339
9340 - if (likely(sz == -1 || sz >= n))
9341 - n = _copy_from_user(to, from, n);
9342 - else
9343 + if (unlikely(sz != -1 && sz < n))
9344 copy_from_user_overflow();
9345 -
9346 + else if (access_ok(VERIFY_READ, from, n))
9347 + n = __copy_from_user(to, from, n);
9348 + else if ((long)n > 0) {
9349 + if (!__builtin_constant_p(n))
9350 + check_object_size(to, n, false);
9351 + memset(to, 0, n);
9352 + }
9353 return n;
9354 }
9355
9356 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess_64.h linux-3.0.4/arch/x86/include/asm/uaccess_64.h
9357 --- linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9358 +++ linux-3.0.4/arch/x86/include/asm/uaccess_64.h 2011-08-23 21:48:14.000000000 -0400
9359 @@ -10,6 +10,9 @@
9360 #include <asm/alternative.h>
9361 #include <asm/cpufeature.h>
9362 #include <asm/page.h>
9363 +#include <asm/pgtable.h>
9364 +
9365 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9366
9367 /*
9368 * Copy To/From Userspace
9369 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9370 return ret;
9371 }
9372
9373 -__must_check unsigned long
9374 -_copy_to_user(void __user *to, const void *from, unsigned len);
9375 -__must_check unsigned long
9376 -_copy_from_user(void *to, const void __user *from, unsigned len);
9377 +static __always_inline __must_check unsigned long
9378 +__copy_to_user(void __user *to, const void *from, unsigned len);
9379 +static __always_inline __must_check unsigned long
9380 +__copy_from_user(void *to, const void __user *from, unsigned len);
9381 __must_check unsigned long
9382 copy_in_user(void __user *to, const void __user *from, unsigned len);
9383
9384 static inline unsigned long __must_check copy_from_user(void *to,
9385 const void __user *from,
9386 - unsigned long n)
9387 + unsigned n)
9388 {
9389 - int sz = __compiletime_object_size(to);
9390 -
9391 might_fault();
9392 - if (likely(sz == -1 || sz >= n))
9393 - n = _copy_from_user(to, from, n);
9394 -#ifdef CONFIG_DEBUG_VM
9395 - else
9396 - WARN(1, "Buffer overflow detected!\n");
9397 -#endif
9398 +
9399 + if (access_ok(VERIFY_READ, from, n))
9400 + n = __copy_from_user(to, from, n);
9401 + else if ((int)n > 0) {
9402 + if (!__builtin_constant_p(n))
9403 + check_object_size(to, n, false);
9404 + memset(to, 0, n);
9405 + }
9406 return n;
9407 }
9408
9409 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9410 {
9411 might_fault();
9412
9413 - return _copy_to_user(dst, src, size);
9414 + if (access_ok(VERIFY_WRITE, dst, size))
9415 + size = __copy_to_user(dst, src, size);
9416 + return size;
9417 }
9418
9419 static __always_inline __must_check
9420 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9421 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9422 {
9423 - int ret = 0;
9424 + int sz = __compiletime_object_size(dst);
9425 + unsigned ret = 0;
9426
9427 might_fault();
9428 - if (!__builtin_constant_p(size))
9429 - return copy_user_generic(dst, (__force void *)src, size);
9430 +
9431 + pax_track_stack();
9432 +
9433 + if ((int)size < 0)
9434 + return size;
9435 +
9436 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9437 + if (!__access_ok(VERIFY_READ, src, size))
9438 + return size;
9439 +#endif
9440 +
9441 + if (unlikely(sz != -1 && sz < size)) {
9442 +#ifdef CONFIG_DEBUG_VM
9443 + WARN(1, "Buffer overflow detected!\n");
9444 +#endif
9445 + return size;
9446 + }
9447 +
9448 + if (!__builtin_constant_p(size)) {
9449 + check_object_size(dst, size, false);
9450 +
9451 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9452 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9453 + src += PAX_USER_SHADOW_BASE;
9454 +#endif
9455 +
9456 + return copy_user_generic(dst, (__force const void *)src, size);
9457 + }
9458 switch (size) {
9459 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9460 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9461 ret, "b", "b", "=q", 1);
9462 return ret;
9463 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9464 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9465 ret, "w", "w", "=r", 2);
9466 return ret;
9467 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9468 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9469 ret, "l", "k", "=r", 4);
9470 return ret;
9471 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9472 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9473 ret, "q", "", "=r", 8);
9474 return ret;
9475 case 10:
9476 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9477 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9478 ret, "q", "", "=r", 10);
9479 if (unlikely(ret))
9480 return ret;
9481 __get_user_asm(*(u16 *)(8 + (char *)dst),
9482 - (u16 __user *)(8 + (char __user *)src),
9483 + (const u16 __user *)(8 + (const char __user *)src),
9484 ret, "w", "w", "=r", 2);
9485 return ret;
9486 case 16:
9487 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9488 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9489 ret, "q", "", "=r", 16);
9490 if (unlikely(ret))
9491 return ret;
9492 __get_user_asm(*(u64 *)(8 + (char *)dst),
9493 - (u64 __user *)(8 + (char __user *)src),
9494 + (const u64 __user *)(8 + (const char __user *)src),
9495 ret, "q", "", "=r", 8);
9496 return ret;
9497 default:
9498 - return copy_user_generic(dst, (__force void *)src, size);
9499 +
9500 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9501 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9502 + src += PAX_USER_SHADOW_BASE;
9503 +#endif
9504 +
9505 + return copy_user_generic(dst, (__force const void *)src, size);
9506 }
9507 }
9508
9509 static __always_inline __must_check
9510 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9511 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9512 {
9513 - int ret = 0;
9514 + int sz = __compiletime_object_size(src);
9515 + unsigned ret = 0;
9516
9517 might_fault();
9518 - if (!__builtin_constant_p(size))
9519 +
9520 + pax_track_stack();
9521 +
9522 + if ((int)size < 0)
9523 + return size;
9524 +
9525 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9526 + if (!__access_ok(VERIFY_WRITE, dst, size))
9527 + return size;
9528 +#endif
9529 +
9530 + if (unlikely(sz != -1 && sz < size)) {
9531 +#ifdef CONFIG_DEBUG_VM
9532 + WARN(1, "Buffer overflow detected!\n");
9533 +#endif
9534 + return size;
9535 + }
9536 +
9537 + if (!__builtin_constant_p(size)) {
9538 + check_object_size(src, size, true);
9539 +
9540 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9541 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9542 + dst += PAX_USER_SHADOW_BASE;
9543 +#endif
9544 +
9545 return copy_user_generic((__force void *)dst, src, size);
9546 + }
9547 switch (size) {
9548 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9549 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9550 ret, "b", "b", "iq", 1);
9551 return ret;
9552 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9553 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9554 ret, "w", "w", "ir", 2);
9555 return ret;
9556 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9557 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9558 ret, "l", "k", "ir", 4);
9559 return ret;
9560 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9561 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9562 ret, "q", "", "er", 8);
9563 return ret;
9564 case 10:
9565 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9566 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9567 ret, "q", "", "er", 10);
9568 if (unlikely(ret))
9569 return ret;
9570 asm("":::"memory");
9571 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9572 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9573 ret, "w", "w", "ir", 2);
9574 return ret;
9575 case 16:
9576 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9577 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9578 ret, "q", "", "er", 16);
9579 if (unlikely(ret))
9580 return ret;
9581 asm("":::"memory");
9582 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9583 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9584 ret, "q", "", "er", 8);
9585 return ret;
9586 default:
9587 +
9588 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9589 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9590 + dst += PAX_USER_SHADOW_BASE;
9591 +#endif
9592 +
9593 return copy_user_generic((__force void *)dst, src, size);
9594 }
9595 }
9596
9597 static __always_inline __must_check
9598 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9599 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9600 {
9601 - int ret = 0;
9602 + unsigned ret = 0;
9603
9604 might_fault();
9605 - if (!__builtin_constant_p(size))
9606 +
9607 + if ((int)size < 0)
9608 + return size;
9609 +
9610 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9611 + if (!__access_ok(VERIFY_READ, src, size))
9612 + return size;
9613 + if (!__access_ok(VERIFY_WRITE, dst, size))
9614 + return size;
9615 +#endif
9616 +
9617 + if (!__builtin_constant_p(size)) {
9618 +
9619 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9620 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9621 + src += PAX_USER_SHADOW_BASE;
9622 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9623 + dst += PAX_USER_SHADOW_BASE;
9624 +#endif
9625 +
9626 return copy_user_generic((__force void *)dst,
9627 - (__force void *)src, size);
9628 + (__force const void *)src, size);
9629 + }
9630 switch (size) {
9631 case 1: {
9632 u8 tmp;
9633 - __get_user_asm(tmp, (u8 __user *)src,
9634 + __get_user_asm(tmp, (const u8 __user *)src,
9635 ret, "b", "b", "=q", 1);
9636 if (likely(!ret))
9637 __put_user_asm(tmp, (u8 __user *)dst,
9638 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9639 }
9640 case 2: {
9641 u16 tmp;
9642 - __get_user_asm(tmp, (u16 __user *)src,
9643 + __get_user_asm(tmp, (const u16 __user *)src,
9644 ret, "w", "w", "=r", 2);
9645 if (likely(!ret))
9646 __put_user_asm(tmp, (u16 __user *)dst,
9647 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9648
9649 case 4: {
9650 u32 tmp;
9651 - __get_user_asm(tmp, (u32 __user *)src,
9652 + __get_user_asm(tmp, (const u32 __user *)src,
9653 ret, "l", "k", "=r", 4);
9654 if (likely(!ret))
9655 __put_user_asm(tmp, (u32 __user *)dst,
9656 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9657 }
9658 case 8: {
9659 u64 tmp;
9660 - __get_user_asm(tmp, (u64 __user *)src,
9661 + __get_user_asm(tmp, (const u64 __user *)src,
9662 ret, "q", "", "=r", 8);
9663 if (likely(!ret))
9664 __put_user_asm(tmp, (u64 __user *)dst,
9665 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9666 return ret;
9667 }
9668 default:
9669 +
9670 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9671 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9672 + src += PAX_USER_SHADOW_BASE;
9673 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9674 + dst += PAX_USER_SHADOW_BASE;
9675 +#endif
9676 +
9677 return copy_user_generic((__force void *)dst,
9678 - (__force void *)src, size);
9679 + (__force const void *)src, size);
9680 }
9681 }
9682
9683 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9684 static __must_check __always_inline int
9685 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9686 {
9687 + pax_track_stack();
9688 +
9689 + if ((int)size < 0)
9690 + return size;
9691 +
9692 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9693 + if (!__access_ok(VERIFY_READ, src, size))
9694 + return size;
9695 +
9696 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9697 + src += PAX_USER_SHADOW_BASE;
9698 +#endif
9699 +
9700 return copy_user_generic(dst, (__force const void *)src, size);
9701 }
9702
9703 -static __must_check __always_inline int
9704 +static __must_check __always_inline unsigned long
9705 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9706 {
9707 + if ((int)size < 0)
9708 + return size;
9709 +
9710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9711 + if (!__access_ok(VERIFY_WRITE, dst, size))
9712 + return size;
9713 +
9714 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9715 + dst += PAX_USER_SHADOW_BASE;
9716 +#endif
9717 +
9718 return copy_user_generic((__force void *)dst, src, size);
9719 }
9720
9721 -extern long __copy_user_nocache(void *dst, const void __user *src,
9722 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9723 unsigned size, int zerorest);
9724
9725 -static inline int
9726 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9727 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9728 {
9729 might_sleep();
9730 +
9731 + if ((int)size < 0)
9732 + return size;
9733 +
9734 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9735 + if (!__access_ok(VERIFY_READ, src, size))
9736 + return size;
9737 +#endif
9738 +
9739 return __copy_user_nocache(dst, src, size, 1);
9740 }
9741
9742 -static inline int
9743 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9744 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9745 unsigned size)
9746 {
9747 + if ((int)size < 0)
9748 + return size;
9749 +
9750 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9751 + if (!__access_ok(VERIFY_READ, src, size))
9752 + return size;
9753 +#endif
9754 +
9755 return __copy_user_nocache(dst, src, size, 0);
9756 }
9757
9758 -unsigned long
9759 +extern unsigned long
9760 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9761
9762 #endif /* _ASM_X86_UACCESS_64_H */
9763 diff -urNp linux-3.0.4/arch/x86/include/asm/uaccess.h linux-3.0.4/arch/x86/include/asm/uaccess.h
9764 --- linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9765 +++ linux-3.0.4/arch/x86/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
9766 @@ -7,12 +7,15 @@
9767 #include <linux/compiler.h>
9768 #include <linux/thread_info.h>
9769 #include <linux/string.h>
9770 +#include <linux/sched.h>
9771 #include <asm/asm.h>
9772 #include <asm/page.h>
9773
9774 #define VERIFY_READ 0
9775 #define VERIFY_WRITE 1
9776
9777 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9778 +
9779 /*
9780 * The fs value determines whether argument validity checking should be
9781 * performed or not. If get_fs() == USER_DS, checking is performed, with
9782 @@ -28,7 +31,12 @@
9783
9784 #define get_ds() (KERNEL_DS)
9785 #define get_fs() (current_thread_info()->addr_limit)
9786 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9787 +void __set_fs(mm_segment_t x);
9788 +void set_fs(mm_segment_t x);
9789 +#else
9790 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9791 +#endif
9792
9793 #define segment_eq(a, b) ((a).seg == (b).seg)
9794
9795 @@ -76,7 +84,33 @@
9796 * checks that the pointer is in the user space range - after calling
9797 * this function, memory access functions may still return -EFAULT.
9798 */
9799 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9800 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9801 +#define access_ok(type, addr, size) \
9802 +({ \
9803 + long __size = size; \
9804 + unsigned long __addr = (unsigned long)addr; \
9805 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9806 + unsigned long __end_ao = __addr + __size - 1; \
9807 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9808 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9809 + while(__addr_ao <= __end_ao) { \
9810 + char __c_ao; \
9811 + __addr_ao += PAGE_SIZE; \
9812 + if (__size > PAGE_SIZE) \
9813 + cond_resched(); \
9814 + if (__get_user(__c_ao, (char __user *)__addr)) \
9815 + break; \
9816 + if (type != VERIFY_WRITE) { \
9817 + __addr = __addr_ao; \
9818 + continue; \
9819 + } \
9820 + if (__put_user(__c_ao, (char __user *)__addr)) \
9821 + break; \
9822 + __addr = __addr_ao; \
9823 + } \
9824 + } \
9825 + __ret_ao; \
9826 +})
9827
9828 /*
9829 * The exception table consists of pairs of addresses: the first is the
9830 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9831 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9832 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9833
9834 -
9835 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9836 +#define __copyuser_seg "gs;"
9837 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9838 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9839 +#else
9840 +#define __copyuser_seg
9841 +#define __COPYUSER_SET_ES
9842 +#define __COPYUSER_RESTORE_ES
9843 +#endif
9844
9845 #ifdef CONFIG_X86_32
9846 #define __put_user_asm_u64(x, addr, err, errret) \
9847 - asm volatile("1: movl %%eax,0(%2)\n" \
9848 - "2: movl %%edx,4(%2)\n" \
9849 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9850 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9851 "3:\n" \
9852 ".section .fixup,\"ax\"\n" \
9853 "4: movl %3,%0\n" \
9854 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
9855 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9856
9857 #define __put_user_asm_ex_u64(x, addr) \
9858 - asm volatile("1: movl %%eax,0(%1)\n" \
9859 - "2: movl %%edx,4(%1)\n" \
9860 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9861 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9862 "3:\n" \
9863 _ASM_EXTABLE(1b, 2b - 1b) \
9864 _ASM_EXTABLE(2b, 3b - 2b) \
9865 @@ -373,7 +415,7 @@ do { \
9866 } while (0)
9867
9868 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9869 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9870 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9871 "2:\n" \
9872 ".section .fixup,\"ax\"\n" \
9873 "3: mov %3,%0\n" \
9874 @@ -381,7 +423,7 @@ do { \
9875 " jmp 2b\n" \
9876 ".previous\n" \
9877 _ASM_EXTABLE(1b, 3b) \
9878 - : "=r" (err), ltype(x) \
9879 + : "=r" (err), ltype (x) \
9880 : "m" (__m(addr)), "i" (errret), "0" (err))
9881
9882 #define __get_user_size_ex(x, ptr, size) \
9883 @@ -406,7 +448,7 @@ do { \
9884 } while (0)
9885
9886 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9887 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9888 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9889 "2:\n" \
9890 _ASM_EXTABLE(1b, 2b - 1b) \
9891 : ltype(x) : "m" (__m(addr)))
9892 @@ -423,13 +465,24 @@ do { \
9893 int __gu_err; \
9894 unsigned long __gu_val; \
9895 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9896 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9897 + (x) = (__typeof__(*(ptr)))__gu_val; \
9898 __gu_err; \
9899 })
9900
9901 /* FIXME: this hack is definitely wrong -AK */
9902 struct __large_struct { unsigned long buf[100]; };
9903 -#define __m(x) (*(struct __large_struct __user *)(x))
9904 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9905 +#define ____m(x) \
9906 +({ \
9907 + unsigned long ____x = (unsigned long)(x); \
9908 + if (____x < PAX_USER_SHADOW_BASE) \
9909 + ____x += PAX_USER_SHADOW_BASE; \
9910 + (void __user *)____x; \
9911 +})
9912 +#else
9913 +#define ____m(x) (x)
9914 +#endif
9915 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9916
9917 /*
9918 * Tell gcc we read from memory instead of writing: this is because
9919 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
9920 * aliasing issues.
9921 */
9922 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9923 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9924 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9925 "2:\n" \
9926 ".section .fixup,\"ax\"\n" \
9927 "3: mov %3,%0\n" \
9928 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
9929 ".previous\n" \
9930 _ASM_EXTABLE(1b, 3b) \
9931 : "=r"(err) \
9932 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9933 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9934
9935 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9936 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9937 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9938 "2:\n" \
9939 _ASM_EXTABLE(1b, 2b - 1b) \
9940 : : ltype(x), "m" (__m(addr)))
9941 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
9942 * On error, the variable @x is set to zero.
9943 */
9944
9945 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9946 +#define __get_user(x, ptr) get_user((x), (ptr))
9947 +#else
9948 #define __get_user(x, ptr) \
9949 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9950 +#endif
9951
9952 /**
9953 * __put_user: - Write a simple value into user space, with less checking.
9954 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
9955 * Returns zero on success, or -EFAULT on error.
9956 */
9957
9958 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9959 +#define __put_user(x, ptr) put_user((x), (ptr))
9960 +#else
9961 #define __put_user(x, ptr) \
9962 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9963 +#endif
9964
9965 #define __get_user_unaligned __get_user
9966 #define __put_user_unaligned __put_user
9967 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
9968 #define get_user_ex(x, ptr) do { \
9969 unsigned long __gue_val; \
9970 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9971 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9972 + (x) = (__typeof__(*(ptr)))__gue_val; \
9973 } while (0)
9974
9975 #ifdef CONFIG_X86_WP_WORKS_OK
9976 diff -urNp linux-3.0.4/arch/x86/include/asm/x86_init.h linux-3.0.4/arch/x86/include/asm/x86_init.h
9977 --- linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
9978 +++ linux-3.0.4/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
9979 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9980 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9981 void (*find_smp_config)(void);
9982 void (*get_smp_config)(unsigned int early);
9983 -};
9984 +} __no_const;
9985
9986 /**
9987 * struct x86_init_resources - platform specific resource related ops
9988 @@ -42,7 +42,7 @@ struct x86_init_resources {
9989 void (*probe_roms)(void);
9990 void (*reserve_resources)(void);
9991 char *(*memory_setup)(void);
9992 -};
9993 +} __no_const;
9994
9995 /**
9996 * struct x86_init_irqs - platform specific interrupt setup
9997 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9998 void (*pre_vector_init)(void);
9999 void (*intr_init)(void);
10000 void (*trap_init)(void);
10001 -};
10002 +} __no_const;
10003
10004 /**
10005 * struct x86_init_oem - oem platform specific customizing functions
10006 @@ -65,7 +65,7 @@ struct x86_init_irqs {
10007 struct x86_init_oem {
10008 void (*arch_setup)(void);
10009 void (*banner)(void);
10010 -};
10011 +} __no_const;
10012
10013 /**
10014 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10015 @@ -76,7 +76,7 @@ struct x86_init_oem {
10016 */
10017 struct x86_init_mapping {
10018 void (*pagetable_reserve)(u64 start, u64 end);
10019 -};
10020 +} __no_const;
10021
10022 /**
10023 * struct x86_init_paging - platform specific paging functions
10024 @@ -86,7 +86,7 @@ struct x86_init_mapping {
10025 struct x86_init_paging {
10026 void (*pagetable_setup_start)(pgd_t *base);
10027 void (*pagetable_setup_done)(pgd_t *base);
10028 -};
10029 +} __no_const;
10030
10031 /**
10032 * struct x86_init_timers - platform specific timer setup
10033 @@ -101,7 +101,7 @@ struct x86_init_timers {
10034 void (*tsc_pre_init)(void);
10035 void (*timer_init)(void);
10036 void (*wallclock_init)(void);
10037 -};
10038 +} __no_const;
10039
10040 /**
10041 * struct x86_init_iommu - platform specific iommu setup
10042 @@ -109,7 +109,7 @@ struct x86_init_timers {
10043 */
10044 struct x86_init_iommu {
10045 int (*iommu_init)(void);
10046 -};
10047 +} __no_const;
10048
10049 /**
10050 * struct x86_init_pci - platform specific pci init functions
10051 @@ -123,7 +123,7 @@ struct x86_init_pci {
10052 int (*init)(void);
10053 void (*init_irq)(void);
10054 void (*fixup_irqs)(void);
10055 -};
10056 +} __no_const;
10057
10058 /**
10059 * struct x86_init_ops - functions for platform specific setup
10060 @@ -139,7 +139,7 @@ struct x86_init_ops {
10061 struct x86_init_timers timers;
10062 struct x86_init_iommu iommu;
10063 struct x86_init_pci pci;
10064 -};
10065 +} __no_const;
10066
10067 /**
10068 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10069 @@ -147,7 +147,7 @@ struct x86_init_ops {
10070 */
10071 struct x86_cpuinit_ops {
10072 void (*setup_percpu_clockev)(void);
10073 -};
10074 +} __no_const;
10075
10076 /**
10077 * struct x86_platform_ops - platform specific runtime functions
10078 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10079 bool (*is_untracked_pat_range)(u64 start, u64 end);
10080 void (*nmi_init)(void);
10081 int (*i8042_detect)(void);
10082 -};
10083 +} __no_const;
10084
10085 struct pci_dev;
10086
10087 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10088 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10089 void (*teardown_msi_irq)(unsigned int irq);
10090 void (*teardown_msi_irqs)(struct pci_dev *dev);
10091 -};
10092 +} __no_const;
10093
10094 extern struct x86_init_ops x86_init;
10095 extern struct x86_cpuinit_ops x86_cpuinit;
10096 diff -urNp linux-3.0.4/arch/x86/include/asm/xsave.h linux-3.0.4/arch/x86/include/asm/xsave.h
10097 --- linux-3.0.4/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10098 +++ linux-3.0.4/arch/x86/include/asm/xsave.h 2011-08-23 21:47:55.000000000 -0400
10099 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10100 {
10101 int err;
10102
10103 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10104 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10105 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10106 +#endif
10107 +
10108 /*
10109 * Clear the xsave header first, so that reserved fields are
10110 * initialized to zero.
10111 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10112 u32 lmask = mask;
10113 u32 hmask = mask >> 32;
10114
10115 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10116 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10117 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10118 +#endif
10119 +
10120 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10121 "2:\n"
10122 ".section .fixup,\"ax\"\n"
10123 diff -urNp linux-3.0.4/arch/x86/Kconfig linux-3.0.4/arch/x86/Kconfig
10124 --- linux-3.0.4/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10125 +++ linux-3.0.4/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10126 @@ -229,7 +229,7 @@ config X86_HT
10127
10128 config X86_32_LAZY_GS
10129 def_bool y
10130 - depends on X86_32 && !CC_STACKPROTECTOR
10131 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10132
10133 config ARCH_HWEIGHT_CFLAGS
10134 string
10135 @@ -1018,7 +1018,7 @@ choice
10136
10137 config NOHIGHMEM
10138 bool "off"
10139 - depends on !X86_NUMAQ
10140 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10141 ---help---
10142 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10143 However, the address space of 32-bit x86 processors is only 4
10144 @@ -1055,7 +1055,7 @@ config NOHIGHMEM
10145
10146 config HIGHMEM4G
10147 bool "4GB"
10148 - depends on !X86_NUMAQ
10149 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10150 ---help---
10151 Select this if you have a 32-bit processor and between 1 and 4
10152 gigabytes of physical RAM.
10153 @@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10154 hex
10155 default 0xB0000000 if VMSPLIT_3G_OPT
10156 default 0x80000000 if VMSPLIT_2G
10157 - default 0x78000000 if VMSPLIT_2G_OPT
10158 + default 0x70000000 if VMSPLIT_2G_OPT
10159 default 0x40000000 if VMSPLIT_1G
10160 default 0xC0000000
10161 depends on X86_32
10162 @@ -1483,6 +1483,7 @@ config SECCOMP
10163
10164 config CC_STACKPROTECTOR
10165 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10166 + depends on X86_64 || !PAX_MEMORY_UDEREF
10167 ---help---
10168 This option turns on the -fstack-protector GCC feature. This
10169 feature puts, at the beginning of functions, a canary value on
10170 @@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10171 config PHYSICAL_START
10172 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10173 default "0x1000000"
10174 + range 0x400000 0x40000000
10175 ---help---
10176 This gives the physical address where the kernel is loaded.
10177
10178 @@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10179 config PHYSICAL_ALIGN
10180 hex "Alignment value to which kernel should be aligned" if X86_32
10181 default "0x1000000"
10182 + range 0x400000 0x1000000 if PAX_KERNEXEC
10183 range 0x2000 0x1000000
10184 ---help---
10185 This value puts the alignment restrictions on physical address
10186 @@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10187 Say N if you want to disable CPU hotplug.
10188
10189 config COMPAT_VDSO
10190 - def_bool y
10191 + def_bool n
10192 prompt "Compat VDSO support"
10193 depends on X86_32 || IA32_EMULATION
10194 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10195 ---help---
10196 Map the 32-bit VDSO to the predictable old-style address too.
10197
10198 diff -urNp linux-3.0.4/arch/x86/Kconfig.cpu linux-3.0.4/arch/x86/Kconfig.cpu
10199 --- linux-3.0.4/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10200 +++ linux-3.0.4/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10201 @@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10202
10203 config X86_F00F_BUG
10204 def_bool y
10205 - depends on M586MMX || M586TSC || M586 || M486 || M386
10206 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10207
10208 config X86_INVD_BUG
10209 def_bool y
10210 @@ -362,7 +362,7 @@ config X86_POPAD_OK
10211
10212 config X86_ALIGNMENT_16
10213 def_bool y
10214 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10215 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10216
10217 config X86_INTEL_USERCOPY
10218 def_bool y
10219 @@ -408,7 +408,7 @@ config X86_CMPXCHG64
10220 # generates cmov.
10221 config X86_CMOV
10222 def_bool y
10223 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10224 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10225
10226 config X86_MINIMUM_CPU_FAMILY
10227 int
10228 diff -urNp linux-3.0.4/arch/x86/Kconfig.debug linux-3.0.4/arch/x86/Kconfig.debug
10229 --- linux-3.0.4/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10230 +++ linux-3.0.4/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10231 @@ -81,7 +81,7 @@ config X86_PTDUMP
10232 config DEBUG_RODATA
10233 bool "Write protect kernel read-only data structures"
10234 default y
10235 - depends on DEBUG_KERNEL
10236 + depends on DEBUG_KERNEL && BROKEN
10237 ---help---
10238 Mark the kernel read-only data as write-protected in the pagetables,
10239 in order to catch accidental (and incorrect) writes to such const
10240 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10241
10242 config DEBUG_SET_MODULE_RONX
10243 bool "Set loadable kernel module data as NX and text as RO"
10244 - depends on MODULES
10245 + depends on MODULES && BROKEN
10246 ---help---
10247 This option helps catch unintended modifications to loadable
10248 kernel module's text and read-only data. It also prevents execution
10249 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile
10250 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10251 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10252 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10253 $(call cc-option, -fno-stack-protector) \
10254 $(call cc-option, -mpreferred-stack-boundary=2)
10255 KBUILD_CFLAGS += $(call cc-option, -m32)
10256 +ifdef CONSTIFY_PLUGIN
10257 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10258 +endif
10259 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10260 GCOV_PROFILE := n
10261
10262 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S
10263 --- linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10264 +++ linux-3.0.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10265 @@ -108,6 +108,9 @@ wakeup_code:
10266 /* Do any other stuff... */
10267
10268 #ifndef CONFIG_64BIT
10269 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10270 + call verify_cpu
10271 +
10272 /* This could also be done in C code... */
10273 movl pmode_cr3, %eax
10274 movl %eax, %cr3
10275 @@ -131,6 +134,7 @@ wakeup_code:
10276 movl pmode_cr0, %eax
10277 movl %eax, %cr0
10278 jmp pmode_return
10279 +# include "../../verify_cpu.S"
10280 #else
10281 pushw $0
10282 pushw trampoline_segment
10283 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/sleep.c linux-3.0.4/arch/x86/kernel/acpi/sleep.c
10284 --- linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10285 +++ linux-3.0.4/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10286 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10287 header->trampoline_segment = trampoline_address() >> 4;
10288 #ifdef CONFIG_SMP
10289 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10290 +
10291 + pax_open_kernel();
10292 early_gdt_descr.address =
10293 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10294 + pax_close_kernel();
10295 +
10296 initial_gs = per_cpu_offset(smp_processor_id());
10297 #endif
10298 initial_code = (unsigned long)wakeup_long64;
10299 diff -urNp linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S
10300 --- linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10301 +++ linux-3.0.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10302 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10303 # and restore the stack ... but you need gdt for this to work
10304 movl saved_context_esp, %esp
10305
10306 - movl %cs:saved_magic, %eax
10307 - cmpl $0x12345678, %eax
10308 + cmpl $0x12345678, saved_magic
10309 jne bogus_magic
10310
10311 # jump to place where we left off
10312 - movl saved_eip, %eax
10313 - jmp *%eax
10314 + jmp *(saved_eip)
10315
10316 bogus_magic:
10317 jmp bogus_magic
10318 diff -urNp linux-3.0.4/arch/x86/kernel/alternative.c linux-3.0.4/arch/x86/kernel/alternative.c
10319 --- linux-3.0.4/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10320 +++ linux-3.0.4/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10321 @@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10322 if (!*poff || ptr < text || ptr >= text_end)
10323 continue;
10324 /* turn DS segment override prefix into lock prefix */
10325 - if (*ptr == 0x3e)
10326 + if (*ktla_ktva(ptr) == 0x3e)
10327 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10328 };
10329 mutex_unlock(&text_mutex);
10330 @@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10331 if (!*poff || ptr < text || ptr >= text_end)
10332 continue;
10333 /* turn lock prefix into DS segment override prefix */
10334 - if (*ptr == 0xf0)
10335 + if (*ktla_ktva(ptr) == 0xf0)
10336 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10337 };
10338 mutex_unlock(&text_mutex);
10339 @@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10340
10341 BUG_ON(p->len > MAX_PATCH_LEN);
10342 /* prep the buffer with the original instructions */
10343 - memcpy(insnbuf, p->instr, p->len);
10344 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10345 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10346 (unsigned long)p->instr, p->len);
10347
10348 @@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10349 if (smp_alt_once)
10350 free_init_pages("SMP alternatives",
10351 (unsigned long)__smp_locks,
10352 - (unsigned long)__smp_locks_end);
10353 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10354
10355 restart_nmi();
10356 }
10357 @@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10358 * instructions. And on the local CPU you need to be protected again NMI or MCE
10359 * handlers seeing an inconsistent instruction while you patch.
10360 */
10361 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10362 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10363 size_t len)
10364 {
10365 unsigned long flags;
10366 local_irq_save(flags);
10367 - memcpy(addr, opcode, len);
10368 +
10369 + pax_open_kernel();
10370 + memcpy(ktla_ktva(addr), opcode, len);
10371 sync_core();
10372 + pax_close_kernel();
10373 +
10374 local_irq_restore(flags);
10375 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10376 that causes hangs on some VIA CPUs. */
10377 @@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10378 */
10379 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10380 {
10381 - unsigned long flags;
10382 - char *vaddr;
10383 + unsigned char *vaddr = ktla_ktva(addr);
10384 struct page *pages[2];
10385 - int i;
10386 + size_t i;
10387
10388 if (!core_kernel_text((unsigned long)addr)) {
10389 - pages[0] = vmalloc_to_page(addr);
10390 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10391 + pages[0] = vmalloc_to_page(vaddr);
10392 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10393 } else {
10394 - pages[0] = virt_to_page(addr);
10395 + pages[0] = virt_to_page(vaddr);
10396 WARN_ON(!PageReserved(pages[0]));
10397 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10398 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10399 }
10400 BUG_ON(!pages[0]);
10401 - local_irq_save(flags);
10402 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10403 - if (pages[1])
10404 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10405 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10406 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10407 - clear_fixmap(FIX_TEXT_POKE0);
10408 - if (pages[1])
10409 - clear_fixmap(FIX_TEXT_POKE1);
10410 - local_flush_tlb();
10411 - sync_core();
10412 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10413 - that causes hangs on some VIA CPUs. */
10414 + text_poke_early(addr, opcode, len);
10415 for (i = 0; i < len; i++)
10416 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10417 - local_irq_restore(flags);
10418 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10419 return addr;
10420 }
10421
10422 diff -urNp linux-3.0.4/arch/x86/kernel/apic/apic.c linux-3.0.4/arch/x86/kernel/apic/apic.c
10423 --- linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10424 +++ linux-3.0.4/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10425 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10426 /*
10427 * Debug level, exported for io_apic.c
10428 */
10429 -unsigned int apic_verbosity;
10430 +int apic_verbosity;
10431
10432 int pic_mode;
10433
10434 @@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10435 apic_write(APIC_ESR, 0);
10436 v1 = apic_read(APIC_ESR);
10437 ack_APIC_irq();
10438 - atomic_inc(&irq_err_count);
10439 + atomic_inc_unchecked(&irq_err_count);
10440
10441 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10442 smp_processor_id(), v0 , v1);
10443 @@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10444 u16 *bios_cpu_apicid;
10445 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10446
10447 + pax_track_stack();
10448 +
10449 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10450 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10451
10452 diff -urNp linux-3.0.4/arch/x86/kernel/apic/io_apic.c linux-3.0.4/arch/x86/kernel/apic/io_apic.c
10453 --- linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10454 +++ linux-3.0.4/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10455 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10456 }
10457 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10458
10459 -void lock_vector_lock(void)
10460 +void lock_vector_lock(void) __acquires(vector_lock)
10461 {
10462 /* Used to the online set of cpus does not change
10463 * during assign_irq_vector.
10464 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10465 raw_spin_lock(&vector_lock);
10466 }
10467
10468 -void unlock_vector_lock(void)
10469 +void unlock_vector_lock(void) __releases(vector_lock)
10470 {
10471 raw_spin_unlock(&vector_lock);
10472 }
10473 @@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10474 ack_APIC_irq();
10475 }
10476
10477 -atomic_t irq_mis_count;
10478 +atomic_unchecked_t irq_mis_count;
10479
10480 /*
10481 * IO-APIC versions below 0x20 don't support EOI register.
10482 @@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10483 * at the cpu.
10484 */
10485 if (!(v & (1 << (i & 0x1f)))) {
10486 - atomic_inc(&irq_mis_count);
10487 + atomic_inc_unchecked(&irq_mis_count);
10488
10489 eoi_ioapic_irq(irq, cfg);
10490 }
10491 diff -urNp linux-3.0.4/arch/x86/kernel/apm_32.c linux-3.0.4/arch/x86/kernel/apm_32.c
10492 --- linux-3.0.4/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10493 +++ linux-3.0.4/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10494 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10495 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10496 * even though they are called in protected mode.
10497 */
10498 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10499 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10500 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10501
10502 static const char driver_version[] = "1.16ac"; /* no spaces */
10503 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10504 BUG_ON(cpu != 0);
10505 gdt = get_cpu_gdt_table(cpu);
10506 save_desc_40 = gdt[0x40 / 8];
10507 +
10508 + pax_open_kernel();
10509 gdt[0x40 / 8] = bad_bios_desc;
10510 + pax_close_kernel();
10511
10512 apm_irq_save(flags);
10513 APM_DO_SAVE_SEGS;
10514 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10515 &call->esi);
10516 APM_DO_RESTORE_SEGS;
10517 apm_irq_restore(flags);
10518 +
10519 + pax_open_kernel();
10520 gdt[0x40 / 8] = save_desc_40;
10521 + pax_close_kernel();
10522 +
10523 put_cpu();
10524
10525 return call->eax & 0xff;
10526 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10527 BUG_ON(cpu != 0);
10528 gdt = get_cpu_gdt_table(cpu);
10529 save_desc_40 = gdt[0x40 / 8];
10530 +
10531 + pax_open_kernel();
10532 gdt[0x40 / 8] = bad_bios_desc;
10533 + pax_close_kernel();
10534
10535 apm_irq_save(flags);
10536 APM_DO_SAVE_SEGS;
10537 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10538 &call->eax);
10539 APM_DO_RESTORE_SEGS;
10540 apm_irq_restore(flags);
10541 +
10542 + pax_open_kernel();
10543 gdt[0x40 / 8] = save_desc_40;
10544 + pax_close_kernel();
10545 +
10546 put_cpu();
10547 return error;
10548 }
10549 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10550 * code to that CPU.
10551 */
10552 gdt = get_cpu_gdt_table(0);
10553 +
10554 + pax_open_kernel();
10555 set_desc_base(&gdt[APM_CS >> 3],
10556 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10557 set_desc_base(&gdt[APM_CS_16 >> 3],
10558 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10559 set_desc_base(&gdt[APM_DS >> 3],
10560 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10561 + pax_close_kernel();
10562
10563 proc_create("apm", 0, NULL, &apm_file_ops);
10564
10565 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets_64.c linux-3.0.4/arch/x86/kernel/asm-offsets_64.c
10566 --- linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10567 +++ linux-3.0.4/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10568 @@ -69,6 +69,7 @@ int main(void)
10569 BLANK();
10570 #undef ENTRY
10571
10572 + DEFINE(TSS_size, sizeof(struct tss_struct));
10573 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10574 BLANK();
10575
10576 diff -urNp linux-3.0.4/arch/x86/kernel/asm-offsets.c linux-3.0.4/arch/x86/kernel/asm-offsets.c
10577 --- linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10578 +++ linux-3.0.4/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10579 @@ -33,6 +33,8 @@ void common(void) {
10580 OFFSET(TI_status, thread_info, status);
10581 OFFSET(TI_addr_limit, thread_info, addr_limit);
10582 OFFSET(TI_preempt_count, thread_info, preempt_count);
10583 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10584 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10585
10586 BLANK();
10587 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10588 @@ -53,8 +55,26 @@ void common(void) {
10589 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10590 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10591 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10592 +
10593 +#ifdef CONFIG_PAX_KERNEXEC
10594 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10595 +#endif
10596 +
10597 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10598 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10599 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10600 +#ifdef CONFIG_X86_64
10601 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10602 +#endif
10603 #endif
10604
10605 +#endif
10606 +
10607 + BLANK();
10608 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10609 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10610 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10611 +
10612 #ifdef CONFIG_XEN
10613 BLANK();
10614 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10615 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/amd.c linux-3.0.4/arch/x86/kernel/cpu/amd.c
10616 --- linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10617 +++ linux-3.0.4/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10618 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10619 unsigned int size)
10620 {
10621 /* AMD errata T13 (order #21922) */
10622 - if ((c->x86 == 6)) {
10623 + if (c->x86 == 6) {
10624 /* Duron Rev A0 */
10625 if (c->x86_model == 3 && c->x86_mask == 0)
10626 size = 64;
10627 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/common.c linux-3.0.4/arch/x86/kernel/cpu/common.c
10628 --- linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10629 +++ linux-3.0.4/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10630 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10631
10632 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10633
10634 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10635 -#ifdef CONFIG_X86_64
10636 - /*
10637 - * We need valid kernel segments for data and code in long mode too
10638 - * IRET will check the segment types kkeil 2000/10/28
10639 - * Also sysret mandates a special GDT layout
10640 - *
10641 - * TLS descriptors are currently at a different place compared to i386.
10642 - * Hopefully nobody expects them at a fixed place (Wine?)
10643 - */
10644 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10645 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10646 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10647 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10648 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10649 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10650 -#else
10651 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10652 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10653 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10654 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10655 - /*
10656 - * Segments used for calling PnP BIOS have byte granularity.
10657 - * They code segments and data segments have fixed 64k limits,
10658 - * the transfer segment sizes are set at run time.
10659 - */
10660 - /* 32-bit code */
10661 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10662 - /* 16-bit code */
10663 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10664 - /* 16-bit data */
10665 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10666 - /* 16-bit data */
10667 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10668 - /* 16-bit data */
10669 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10670 - /*
10671 - * The APM segments have byte granularity and their bases
10672 - * are set at run time. All have 64k limits.
10673 - */
10674 - /* 32-bit code */
10675 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10676 - /* 16-bit code */
10677 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10678 - /* data */
10679 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10680 -
10681 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10682 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10683 - GDT_STACK_CANARY_INIT
10684 -#endif
10685 -} };
10686 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10687 -
10688 static int __init x86_xsave_setup(char *s)
10689 {
10690 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10691 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10692 {
10693 struct desc_ptr gdt_descr;
10694
10695 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10696 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10697 gdt_descr.size = GDT_SIZE - 1;
10698 load_gdt(&gdt_descr);
10699 /* Reload the per-cpu base */
10700 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10701 /* Filter out anything that depends on CPUID levels we don't have */
10702 filter_cpuid_features(c, true);
10703
10704 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10705 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10706 +#endif
10707 +
10708 /* If the model name is still unset, do table lookup. */
10709 if (!c->x86_model_id[0]) {
10710 const char *p;
10711 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10712 }
10713 __setup("clearcpuid=", setup_disablecpuid);
10714
10715 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10716 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10717 +
10718 #ifdef CONFIG_X86_64
10719 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10720
10721 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10722 EXPORT_PER_CPU_SYMBOL(current_task);
10723
10724 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10725 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10726 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10727 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10728
10729 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10730 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10731 {
10732 memset(regs, 0, sizeof(struct pt_regs));
10733 regs->fs = __KERNEL_PERCPU;
10734 - regs->gs = __KERNEL_STACK_CANARY;
10735 + savesegment(gs, regs->gs);
10736
10737 return regs;
10738 }
10739 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10740 int i;
10741
10742 cpu = stack_smp_processor_id();
10743 - t = &per_cpu(init_tss, cpu);
10744 + t = init_tss + cpu;
10745 oist = &per_cpu(orig_ist, cpu);
10746
10747 #ifdef CONFIG_NUMA
10748 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10749 switch_to_new_gdt(cpu);
10750 loadsegment(fs, 0);
10751
10752 - load_idt((const struct desc_ptr *)&idt_descr);
10753 + load_idt(&idt_descr);
10754
10755 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10756 syscall_init();
10757 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10758 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10759 barrier();
10760
10761 - x86_configure_nx();
10762 if (cpu != 0)
10763 enable_x2apic();
10764
10765 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10766 {
10767 int cpu = smp_processor_id();
10768 struct task_struct *curr = current;
10769 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10770 + struct tss_struct *t = init_tss + cpu;
10771 struct thread_struct *thread = &curr->thread;
10772
10773 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10774 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/intel.c linux-3.0.4/arch/x86/kernel/cpu/intel.c
10775 --- linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10776 +++ linux-3.0.4/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10777 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10778 * Update the IDT descriptor and reload the IDT so that
10779 * it uses the read-only mapped virtual address.
10780 */
10781 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10782 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10783 load_idt(&idt_descr);
10784 }
10785 #endif
10786 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/Makefile linux-3.0.4/arch/x86/kernel/cpu/Makefile
10787 --- linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10788 +++ linux-3.0.4/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10789 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10790 CFLAGS_REMOVE_perf_event.o = -pg
10791 endif
10792
10793 -# Make sure load_percpu_segment has no stackprotector
10794 -nostackp := $(call cc-option, -fno-stack-protector)
10795 -CFLAGS_common.o := $(nostackp)
10796 -
10797 obj-y := intel_cacheinfo.o scattered.o topology.o
10798 obj-y += proc.o capflags.o powerflags.o common.o
10799 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10800 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c
10801 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10802 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10803 @@ -46,6 +46,7 @@
10804 #include <asm/ipi.h>
10805 #include <asm/mce.h>
10806 #include <asm/msr.h>
10807 +#include <asm/local.h>
10808
10809 #include "mce-internal.h"
10810
10811 @@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10812 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10813 m->cs, m->ip);
10814
10815 - if (m->cs == __KERNEL_CS)
10816 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10817 print_symbol("{%s}", m->ip);
10818 pr_cont("\n");
10819 }
10820 @@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10821
10822 #define PANIC_TIMEOUT 5 /* 5 seconds */
10823
10824 -static atomic_t mce_paniced;
10825 +static atomic_unchecked_t mce_paniced;
10826
10827 static int fake_panic;
10828 -static atomic_t mce_fake_paniced;
10829 +static atomic_unchecked_t mce_fake_paniced;
10830
10831 /* Panic in progress. Enable interrupts and wait for final IPI */
10832 static void wait_for_panic(void)
10833 @@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
10834 /*
10835 * Make sure only one CPU runs in machine check panic
10836 */
10837 - if (atomic_inc_return(&mce_paniced) > 1)
10838 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10839 wait_for_panic();
10840 barrier();
10841
10842 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10843 console_verbose();
10844 } else {
10845 /* Don't log too much for fake panic */
10846 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10847 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10848 return;
10849 }
10850 /* First print corrected ones that are still unlogged */
10851 @@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
10852 * might have been modified by someone else.
10853 */
10854 rmb();
10855 - if (atomic_read(&mce_paniced))
10856 + if (atomic_read_unchecked(&mce_paniced))
10857 wait_for_panic();
10858 if (!monarch_timeout)
10859 goto out;
10860 @@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10861 */
10862
10863 static DEFINE_SPINLOCK(mce_state_lock);
10864 -static int open_count; /* #times opened */
10865 +static local_t open_count; /* #times opened */
10866 static int open_exclu; /* already open exclusive? */
10867
10868 static int mce_open(struct inode *inode, struct file *file)
10869 {
10870 spin_lock(&mce_state_lock);
10871
10872 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10873 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10874 spin_unlock(&mce_state_lock);
10875
10876 return -EBUSY;
10877 @@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
10878
10879 if (file->f_flags & O_EXCL)
10880 open_exclu = 1;
10881 - open_count++;
10882 + local_inc(&open_count);
10883
10884 spin_unlock(&mce_state_lock);
10885
10886 @@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
10887 {
10888 spin_lock(&mce_state_lock);
10889
10890 - open_count--;
10891 + local_dec(&open_count);
10892 open_exclu = 0;
10893
10894 spin_unlock(&mce_state_lock);
10895 @@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
10896 static void mce_reset(void)
10897 {
10898 cpu_missing = 0;
10899 - atomic_set(&mce_fake_paniced, 0);
10900 + atomic_set_unchecked(&mce_fake_paniced, 0);
10901 atomic_set(&mce_executing, 0);
10902 atomic_set(&mce_callin, 0);
10903 atomic_set(&global_nwo, 0);
10904 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10905 --- linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
10906 +++ linux-3.0.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
10907 @@ -215,7 +215,9 @@ static int inject_init(void)
10908 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10909 return -ENOMEM;
10910 printk(KERN_INFO "Machine check injector initialized\n");
10911 - mce_chrdev_ops.write = mce_write;
10912 + pax_open_kernel();
10913 + *(void **)&mce_chrdev_ops.write = mce_write;
10914 + pax_close_kernel();
10915 register_die_notifier(&mce_raise_nb);
10916 return 0;
10917 }
10918 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c
10919 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
10920 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
10921 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10922 u64 size_or_mask, size_and_mask;
10923 static bool mtrr_aps_delayed_init;
10924
10925 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10926 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10927
10928 const struct mtrr_ops *mtrr_if;
10929
10930 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10931 --- linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
10932 +++ linux-3.0.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
10933 @@ -25,7 +25,7 @@ struct mtrr_ops {
10934 int (*validate_add_page)(unsigned long base, unsigned long size,
10935 unsigned int type);
10936 int (*have_wrcomb)(void);
10937 -};
10938 +} __do_const;
10939
10940 extern int generic_get_free_region(unsigned long base, unsigned long size,
10941 int replace_reg);
10942 diff -urNp linux-3.0.4/arch/x86/kernel/cpu/perf_event.c linux-3.0.4/arch/x86/kernel/cpu/perf_event.c
10943 --- linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-07-21 22:17:23.000000000 -0400
10944 +++ linux-3.0.4/arch/x86/kernel/cpu/perf_event.c 2011-08-23 21:48:14.000000000 -0400
10945 @@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
10946 int i, j, w, wmax, num = 0;
10947 struct hw_perf_event *hwc;
10948
10949 + pax_track_stack();
10950 +
10951 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10952
10953 for (i = 0; i < n; i++) {
10954 @@ -1872,7 +1874,7 @@ perf_callchain_user(struct perf_callchai
10955 break;
10956
10957 perf_callchain_store(entry, frame.return_address);
10958 - fp = frame.next_frame;
10959 + fp = (__force const void __user *)frame.next_frame;
10960 }
10961 }
10962
10963 diff -urNp linux-3.0.4/arch/x86/kernel/crash.c linux-3.0.4/arch/x86/kernel/crash.c
10964 --- linux-3.0.4/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
10965 +++ linux-3.0.4/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
10966 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10967 regs = args->regs;
10968
10969 #ifdef CONFIG_X86_32
10970 - if (!user_mode_vm(regs)) {
10971 + if (!user_mode(regs)) {
10972 crash_fixup_ss_esp(&fixed_regs, regs);
10973 regs = &fixed_regs;
10974 }
10975 diff -urNp linux-3.0.4/arch/x86/kernel/doublefault_32.c linux-3.0.4/arch/x86/kernel/doublefault_32.c
10976 --- linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
10977 +++ linux-3.0.4/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
10978 @@ -11,7 +11,7 @@
10979
10980 #define DOUBLEFAULT_STACKSIZE (1024)
10981 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10982 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10983 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10984
10985 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10986
10987 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10988 unsigned long gdt, tss;
10989
10990 store_gdt(&gdt_desc);
10991 - gdt = gdt_desc.address;
10992 + gdt = (unsigned long)gdt_desc.address;
10993
10994 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10995
10996 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10997 /* 0x2 bit is always set */
10998 .flags = X86_EFLAGS_SF | 0x2,
10999 .sp = STACK_START,
11000 - .es = __USER_DS,
11001 + .es = __KERNEL_DS,
11002 .cs = __KERNEL_CS,
11003 .ss = __KERNEL_DS,
11004 - .ds = __USER_DS,
11005 + .ds = __KERNEL_DS,
11006 .fs = __KERNEL_PERCPU,
11007
11008 .__cr3 = __pa_nodebug(swapper_pg_dir),
11009 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_32.c linux-3.0.4/arch/x86/kernel/dumpstack_32.c
11010 --- linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11011 +++ linux-3.0.4/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11012 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11013 bp = stack_frame(task, regs);
11014
11015 for (;;) {
11016 - struct thread_info *context;
11017 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11018
11019 - context = (struct thread_info *)
11020 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11021 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11022 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11023
11024 - stack = (unsigned long *)context->previous_esp;
11025 - if (!stack)
11026 + if (stack_start == task_stack_page(task))
11027 break;
11028 + stack = *(unsigned long **)stack_start;
11029 if (ops->stack(data, "IRQ") < 0)
11030 break;
11031 touch_nmi_watchdog();
11032 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11033 * When in-kernel, we also print out the stack and code at the
11034 * time of the fault..
11035 */
11036 - if (!user_mode_vm(regs)) {
11037 + if (!user_mode(regs)) {
11038 unsigned int code_prologue = code_bytes * 43 / 64;
11039 unsigned int code_len = code_bytes;
11040 unsigned char c;
11041 u8 *ip;
11042 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11043
11044 printk(KERN_EMERG "Stack:\n");
11045 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11046
11047 printk(KERN_EMERG "Code: ");
11048
11049 - ip = (u8 *)regs->ip - code_prologue;
11050 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11051 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11052 /* try starting at IP */
11053 - ip = (u8 *)regs->ip;
11054 + ip = (u8 *)regs->ip + cs_base;
11055 code_len = code_len - code_prologue + 1;
11056 }
11057 for (i = 0; i < code_len; i++, ip++) {
11058 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11059 printk(" Bad EIP value.");
11060 break;
11061 }
11062 - if (ip == (u8 *)regs->ip)
11063 + if (ip == (u8 *)regs->ip + cs_base)
11064 printk("<%02x> ", c);
11065 else
11066 printk("%02x ", c);
11067 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11068 {
11069 unsigned short ud2;
11070
11071 + ip = ktla_ktva(ip);
11072 if (ip < PAGE_OFFSET)
11073 return 0;
11074 if (probe_kernel_address((unsigned short *)ip, ud2))
11075 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack_64.c linux-3.0.4/arch/x86/kernel/dumpstack_64.c
11076 --- linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11077 +++ linux-3.0.4/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11078 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11079 unsigned long *irq_stack_end =
11080 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11081 unsigned used = 0;
11082 - struct thread_info *tinfo;
11083 int graph = 0;
11084 unsigned long dummy;
11085 + void *stack_start;
11086
11087 if (!task)
11088 task = current;
11089 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11090 * current stack address. If the stacks consist of nested
11091 * exceptions
11092 */
11093 - tinfo = task_thread_info(task);
11094 for (;;) {
11095 char *id;
11096 unsigned long *estack_end;
11097 +
11098 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11099 &used, &id);
11100
11101 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11102 if (ops->stack(data, id) < 0)
11103 break;
11104
11105 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11106 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11107 data, estack_end, &graph);
11108 ops->stack(data, "<EOE>");
11109 /*
11110 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11111 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11112 if (ops->stack(data, "IRQ") < 0)
11113 break;
11114 - bp = ops->walk_stack(tinfo, stack, bp,
11115 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11116 ops, data, irq_stack_end, &graph);
11117 /*
11118 * We link to the next stack (which would be
11119 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11120 /*
11121 * This handles the process stack:
11122 */
11123 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11124 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11125 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11126 put_cpu();
11127 }
11128 EXPORT_SYMBOL(dump_trace);
11129 diff -urNp linux-3.0.4/arch/x86/kernel/dumpstack.c linux-3.0.4/arch/x86/kernel/dumpstack.c
11130 --- linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11131 +++ linux-3.0.4/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11132 @@ -2,6 +2,9 @@
11133 * Copyright (C) 1991, 1992 Linus Torvalds
11134 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11135 */
11136 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11137 +#define __INCLUDED_BY_HIDESYM 1
11138 +#endif
11139 #include <linux/kallsyms.h>
11140 #include <linux/kprobes.h>
11141 #include <linux/uaccess.h>
11142 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11143 static void
11144 print_ftrace_graph_addr(unsigned long addr, void *data,
11145 const struct stacktrace_ops *ops,
11146 - struct thread_info *tinfo, int *graph)
11147 + struct task_struct *task, int *graph)
11148 {
11149 - struct task_struct *task = tinfo->task;
11150 unsigned long ret_addr;
11151 int index = task->curr_ret_stack;
11152
11153 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11154 static inline void
11155 print_ftrace_graph_addr(unsigned long addr, void *data,
11156 const struct stacktrace_ops *ops,
11157 - struct thread_info *tinfo, int *graph)
11158 + struct task_struct *task, int *graph)
11159 { }
11160 #endif
11161
11162 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11163 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11164 */
11165
11166 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11167 - void *p, unsigned int size, void *end)
11168 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11169 {
11170 - void *t = tinfo;
11171 if (end) {
11172 if (p < end && p >= (end-THREAD_SIZE))
11173 return 1;
11174 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11175 }
11176
11177 unsigned long
11178 -print_context_stack(struct thread_info *tinfo,
11179 +print_context_stack(struct task_struct *task, void *stack_start,
11180 unsigned long *stack, unsigned long bp,
11181 const struct stacktrace_ops *ops, void *data,
11182 unsigned long *end, int *graph)
11183 {
11184 struct stack_frame *frame = (struct stack_frame *)bp;
11185
11186 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11187 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11188 unsigned long addr;
11189
11190 addr = *stack;
11191 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11192 } else {
11193 ops->address(data, addr, 0);
11194 }
11195 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11196 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11197 }
11198 stack++;
11199 }
11200 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11201 EXPORT_SYMBOL_GPL(print_context_stack);
11202
11203 unsigned long
11204 -print_context_stack_bp(struct thread_info *tinfo,
11205 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11206 unsigned long *stack, unsigned long bp,
11207 const struct stacktrace_ops *ops, void *data,
11208 unsigned long *end, int *graph)
11209 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11210 struct stack_frame *frame = (struct stack_frame *)bp;
11211 unsigned long *ret_addr = &frame->return_address;
11212
11213 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11214 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11215 unsigned long addr = *ret_addr;
11216
11217 if (!__kernel_text_address(addr))
11218 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11219 ops->address(data, addr, 1);
11220 frame = frame->next_frame;
11221 ret_addr = &frame->return_address;
11222 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11223 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11224 }
11225
11226 return (unsigned long)frame;
11227 @@ -186,7 +186,7 @@ void dump_stack(void)
11228
11229 bp = stack_frame(current, NULL);
11230 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11231 - current->pid, current->comm, print_tainted(),
11232 + task_pid_nr(current), current->comm, print_tainted(),
11233 init_utsname()->release,
11234 (int)strcspn(init_utsname()->version, " "),
11235 init_utsname()->version);
11236 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11237 }
11238 EXPORT_SYMBOL_GPL(oops_begin);
11239
11240 +extern void gr_handle_kernel_exploit(void);
11241 +
11242 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11243 {
11244 if (regs && kexec_should_crash(current))
11245 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11246 panic("Fatal exception in interrupt");
11247 if (panic_on_oops)
11248 panic("Fatal exception");
11249 - do_exit(signr);
11250 +
11251 + gr_handle_kernel_exploit();
11252 +
11253 + do_group_exit(signr);
11254 }
11255
11256 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11257 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11258
11259 show_registers(regs);
11260 #ifdef CONFIG_X86_32
11261 - if (user_mode_vm(regs)) {
11262 + if (user_mode(regs)) {
11263 sp = regs->sp;
11264 ss = regs->ss & 0xffff;
11265 } else {
11266 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11267 unsigned long flags = oops_begin();
11268 int sig = SIGSEGV;
11269
11270 - if (!user_mode_vm(regs))
11271 + if (!user_mode(regs))
11272 report_bug(regs->ip, regs);
11273
11274 if (__die(str, regs, err))
11275 diff -urNp linux-3.0.4/arch/x86/kernel/early_printk.c linux-3.0.4/arch/x86/kernel/early_printk.c
11276 --- linux-3.0.4/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11277 +++ linux-3.0.4/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11278 @@ -7,6 +7,7 @@
11279 #include <linux/pci_regs.h>
11280 #include <linux/pci_ids.h>
11281 #include <linux/errno.h>
11282 +#include <linux/sched.h>
11283 #include <asm/io.h>
11284 #include <asm/processor.h>
11285 #include <asm/fcntl.h>
11286 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11287 int n;
11288 va_list ap;
11289
11290 + pax_track_stack();
11291 +
11292 va_start(ap, fmt);
11293 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11294 early_console->write(early_console, buf, n);
11295 diff -urNp linux-3.0.4/arch/x86/kernel/entry_32.S linux-3.0.4/arch/x86/kernel/entry_32.S
11296 --- linux-3.0.4/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11297 +++ linux-3.0.4/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11298 @@ -185,13 +185,146 @@
11299 /*CFI_REL_OFFSET gs, PT_GS*/
11300 .endm
11301 .macro SET_KERNEL_GS reg
11302 +
11303 +#ifdef CONFIG_CC_STACKPROTECTOR
11304 movl $(__KERNEL_STACK_CANARY), \reg
11305 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11306 + movl $(__USER_DS), \reg
11307 +#else
11308 + xorl \reg, \reg
11309 +#endif
11310 +
11311 movl \reg, %gs
11312 .endm
11313
11314 #endif /* CONFIG_X86_32_LAZY_GS */
11315
11316 -.macro SAVE_ALL
11317 +.macro pax_enter_kernel
11318 +#ifdef CONFIG_PAX_KERNEXEC
11319 + call pax_enter_kernel
11320 +#endif
11321 +.endm
11322 +
11323 +.macro pax_exit_kernel
11324 +#ifdef CONFIG_PAX_KERNEXEC
11325 + call pax_exit_kernel
11326 +#endif
11327 +.endm
11328 +
11329 +#ifdef CONFIG_PAX_KERNEXEC
11330 +ENTRY(pax_enter_kernel)
11331 +#ifdef CONFIG_PARAVIRT
11332 + pushl %eax
11333 + pushl %ecx
11334 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11335 + mov %eax, %esi
11336 +#else
11337 + mov %cr0, %esi
11338 +#endif
11339 + bts $16, %esi
11340 + jnc 1f
11341 + mov %cs, %esi
11342 + cmp $__KERNEL_CS, %esi
11343 + jz 3f
11344 + ljmp $__KERNEL_CS, $3f
11345 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11346 +2:
11347 +#ifdef CONFIG_PARAVIRT
11348 + mov %esi, %eax
11349 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11350 +#else
11351 + mov %esi, %cr0
11352 +#endif
11353 +3:
11354 +#ifdef CONFIG_PARAVIRT
11355 + popl %ecx
11356 + popl %eax
11357 +#endif
11358 + ret
11359 +ENDPROC(pax_enter_kernel)
11360 +
11361 +ENTRY(pax_exit_kernel)
11362 +#ifdef CONFIG_PARAVIRT
11363 + pushl %eax
11364 + pushl %ecx
11365 +#endif
11366 + mov %cs, %esi
11367 + cmp $__KERNEXEC_KERNEL_CS, %esi
11368 + jnz 2f
11369 +#ifdef CONFIG_PARAVIRT
11370 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11371 + mov %eax, %esi
11372 +#else
11373 + mov %cr0, %esi
11374 +#endif
11375 + btr $16, %esi
11376 + ljmp $__KERNEL_CS, $1f
11377 +1:
11378 +#ifdef CONFIG_PARAVIRT
11379 + mov %esi, %eax
11380 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11381 +#else
11382 + mov %esi, %cr0
11383 +#endif
11384 +2:
11385 +#ifdef CONFIG_PARAVIRT
11386 + popl %ecx
11387 + popl %eax
11388 +#endif
11389 + ret
11390 +ENDPROC(pax_exit_kernel)
11391 +#endif
11392 +
11393 +.macro pax_erase_kstack
11394 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11395 + call pax_erase_kstack
11396 +#endif
11397 +.endm
11398 +
11399 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11400 +/*
11401 + * ebp: thread_info
11402 + * ecx, edx: can be clobbered
11403 + */
11404 +ENTRY(pax_erase_kstack)
11405 + pushl %edi
11406 + pushl %eax
11407 +
11408 + mov TI_lowest_stack(%ebp), %edi
11409 + mov $-0xBEEF, %eax
11410 + std
11411 +
11412 +1: mov %edi, %ecx
11413 + and $THREAD_SIZE_asm - 1, %ecx
11414 + shr $2, %ecx
11415 + repne scasl
11416 + jecxz 2f
11417 +
11418 + cmp $2*16, %ecx
11419 + jc 2f
11420 +
11421 + mov $2*16, %ecx
11422 + repe scasl
11423 + jecxz 2f
11424 + jne 1b
11425 +
11426 +2: cld
11427 + mov %esp, %ecx
11428 + sub %edi, %ecx
11429 + shr $2, %ecx
11430 + rep stosl
11431 +
11432 + mov TI_task_thread_sp0(%ebp), %edi
11433 + sub $128, %edi
11434 + mov %edi, TI_lowest_stack(%ebp)
11435 +
11436 + popl %eax
11437 + popl %edi
11438 + ret
11439 +ENDPROC(pax_erase_kstack)
11440 +#endif
11441 +
11442 +.macro __SAVE_ALL _DS
11443 cld
11444 PUSH_GS
11445 pushl_cfi %fs
11446 @@ -214,7 +347,7 @@
11447 CFI_REL_OFFSET ecx, 0
11448 pushl_cfi %ebx
11449 CFI_REL_OFFSET ebx, 0
11450 - movl $(__USER_DS), %edx
11451 + movl $\_DS, %edx
11452 movl %edx, %ds
11453 movl %edx, %es
11454 movl $(__KERNEL_PERCPU), %edx
11455 @@ -222,6 +355,15 @@
11456 SET_KERNEL_GS %edx
11457 .endm
11458
11459 +.macro SAVE_ALL
11460 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11461 + __SAVE_ALL __KERNEL_DS
11462 + pax_enter_kernel
11463 +#else
11464 + __SAVE_ALL __USER_DS
11465 +#endif
11466 +.endm
11467 +
11468 .macro RESTORE_INT_REGS
11469 popl_cfi %ebx
11470 CFI_RESTORE ebx
11471 @@ -332,7 +474,15 @@ check_userspace:
11472 movb PT_CS(%esp), %al
11473 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11474 cmpl $USER_RPL, %eax
11475 +
11476 +#ifdef CONFIG_PAX_KERNEXEC
11477 + jae resume_userspace
11478 +
11479 + PAX_EXIT_KERNEL
11480 + jmp resume_kernel
11481 +#else
11482 jb resume_kernel # not returning to v8086 or userspace
11483 +#endif
11484
11485 ENTRY(resume_userspace)
11486 LOCKDEP_SYS_EXIT
11487 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11488 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11489 # int/exception return?
11490 jne work_pending
11491 - jmp restore_all
11492 + jmp restore_all_pax
11493 END(ret_from_exception)
11494
11495 #ifdef CONFIG_PREEMPT
11496 @@ -394,23 +544,34 @@ sysenter_past_esp:
11497 /*CFI_REL_OFFSET cs, 0*/
11498 /*
11499 * Push current_thread_info()->sysenter_return to the stack.
11500 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11501 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11502 */
11503 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11504 + pushl_cfi $0
11505 CFI_REL_OFFSET eip, 0
11506
11507 pushl_cfi %eax
11508 SAVE_ALL
11509 + GET_THREAD_INFO(%ebp)
11510 + movl TI_sysenter_return(%ebp),%ebp
11511 + movl %ebp,PT_EIP(%esp)
11512 ENABLE_INTERRUPTS(CLBR_NONE)
11513
11514 /*
11515 * Load the potential sixth argument from user stack.
11516 * Careful about security.
11517 */
11518 + movl PT_OLDESP(%esp),%ebp
11519 +
11520 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11521 + mov PT_OLDSS(%esp),%ds
11522 +1: movl %ds:(%ebp),%ebp
11523 + push %ss
11524 + pop %ds
11525 +#else
11526 cmpl $__PAGE_OFFSET-3,%ebp
11527 jae syscall_fault
11528 1: movl (%ebp),%ebp
11529 +#endif
11530 +
11531 movl %ebp,PT_EBP(%esp)
11532 .section __ex_table,"a"
11533 .align 4
11534 @@ -433,12 +594,24 @@ sysenter_do_call:
11535 testl $_TIF_ALLWORK_MASK, %ecx
11536 jne sysexit_audit
11537 sysenter_exit:
11538 +
11539 +#ifdef CONFIG_PAX_RANDKSTACK
11540 + pushl_cfi %eax
11541 + movl %esp, %eax
11542 + call pax_randomize_kstack
11543 + popl_cfi %eax
11544 +#endif
11545 +
11546 + pax_erase_kstack
11547 +
11548 /* if something modifies registers it must also disable sysexit */
11549 movl PT_EIP(%esp), %edx
11550 movl PT_OLDESP(%esp), %ecx
11551 xorl %ebp,%ebp
11552 TRACE_IRQS_ON
11553 1: mov PT_FS(%esp), %fs
11554 +2: mov PT_DS(%esp), %ds
11555 +3: mov PT_ES(%esp), %es
11556 PTGS_TO_GS
11557 ENABLE_INTERRUPTS_SYSEXIT
11558
11559 @@ -455,6 +628,9 @@ sysenter_audit:
11560 movl %eax,%edx /* 2nd arg: syscall number */
11561 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11562 call audit_syscall_entry
11563 +
11564 + pax_erase_kstack
11565 +
11566 pushl_cfi %ebx
11567 movl PT_EAX(%esp),%eax /* reload syscall number */
11568 jmp sysenter_do_call
11569 @@ -481,11 +657,17 @@ sysexit_audit:
11570
11571 CFI_ENDPROC
11572 .pushsection .fixup,"ax"
11573 -2: movl $0,PT_FS(%esp)
11574 +4: movl $0,PT_FS(%esp)
11575 + jmp 1b
11576 +5: movl $0,PT_DS(%esp)
11577 + jmp 1b
11578 +6: movl $0,PT_ES(%esp)
11579 jmp 1b
11580 .section __ex_table,"a"
11581 .align 4
11582 - .long 1b,2b
11583 + .long 1b,4b
11584 + .long 2b,5b
11585 + .long 3b,6b
11586 .popsection
11587 PTGS_TO_GS_EX
11588 ENDPROC(ia32_sysenter_target)
11589 @@ -518,6 +700,15 @@ syscall_exit:
11590 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11591 jne syscall_exit_work
11592
11593 +restore_all_pax:
11594 +
11595 +#ifdef CONFIG_PAX_RANDKSTACK
11596 + movl %esp, %eax
11597 + call pax_randomize_kstack
11598 +#endif
11599 +
11600 + pax_erase_kstack
11601 +
11602 restore_all:
11603 TRACE_IRQS_IRET
11604 restore_all_notrace:
11605 @@ -577,14 +768,34 @@ ldt_ss:
11606 * compensating for the offset by changing to the ESPFIX segment with
11607 * a base address that matches for the difference.
11608 */
11609 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11610 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11611 mov %esp, %edx /* load kernel esp */
11612 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11613 mov %dx, %ax /* eax: new kernel esp */
11614 sub %eax, %edx /* offset (low word is 0) */
11615 +#ifdef CONFIG_SMP
11616 + movl PER_CPU_VAR(cpu_number), %ebx
11617 + shll $PAGE_SHIFT_asm, %ebx
11618 + addl $cpu_gdt_table, %ebx
11619 +#else
11620 + movl $cpu_gdt_table, %ebx
11621 +#endif
11622 shr $16, %edx
11623 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11624 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11625 +
11626 +#ifdef CONFIG_PAX_KERNEXEC
11627 + mov %cr0, %esi
11628 + btr $16, %esi
11629 + mov %esi, %cr0
11630 +#endif
11631 +
11632 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11633 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11634 +
11635 +#ifdef CONFIG_PAX_KERNEXEC
11636 + bts $16, %esi
11637 + mov %esi, %cr0
11638 +#endif
11639 +
11640 pushl_cfi $__ESPFIX_SS
11641 pushl_cfi %eax /* new kernel esp */
11642 /* Disable interrupts, but do not irqtrace this section: we
11643 @@ -613,29 +824,23 @@ work_resched:
11644 movl TI_flags(%ebp), %ecx
11645 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11646 # than syscall tracing?
11647 - jz restore_all
11648 + jz restore_all_pax
11649 testb $_TIF_NEED_RESCHED, %cl
11650 jnz work_resched
11651
11652 work_notifysig: # deal with pending signals and
11653 # notify-resume requests
11654 + movl %esp, %eax
11655 #ifdef CONFIG_VM86
11656 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11657 - movl %esp, %eax
11658 - jne work_notifysig_v86 # returning to kernel-space or
11659 + jz 1f # returning to kernel-space or
11660 # vm86-space
11661 - xorl %edx, %edx
11662 - call do_notify_resume
11663 - jmp resume_userspace_sig
11664
11665 - ALIGN
11666 -work_notifysig_v86:
11667 pushl_cfi %ecx # save ti_flags for do_notify_resume
11668 call save_v86_state # %eax contains pt_regs pointer
11669 popl_cfi %ecx
11670 movl %eax, %esp
11671 -#else
11672 - movl %esp, %eax
11673 +1:
11674 #endif
11675 xorl %edx, %edx
11676 call do_notify_resume
11677 @@ -648,6 +853,9 @@ syscall_trace_entry:
11678 movl $-ENOSYS,PT_EAX(%esp)
11679 movl %esp, %eax
11680 call syscall_trace_enter
11681 +
11682 + pax_erase_kstack
11683 +
11684 /* What it returned is what we'll actually use. */
11685 cmpl $(nr_syscalls), %eax
11686 jnae syscall_call
11687 @@ -670,6 +878,10 @@ END(syscall_exit_work)
11688
11689 RING0_INT_FRAME # can't unwind into user space anyway
11690 syscall_fault:
11691 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11692 + push %ss
11693 + pop %ds
11694 +#endif
11695 GET_THREAD_INFO(%ebp)
11696 movl $-EFAULT,PT_EAX(%esp)
11697 jmp resume_userspace
11698 @@ -752,6 +964,36 @@ ptregs_clone:
11699 CFI_ENDPROC
11700 ENDPROC(ptregs_clone)
11701
11702 + ALIGN;
11703 +ENTRY(kernel_execve)
11704 + CFI_STARTPROC
11705 + pushl_cfi %ebp
11706 + sub $PT_OLDSS+4,%esp
11707 + pushl_cfi %edi
11708 + pushl_cfi %ecx
11709 + pushl_cfi %eax
11710 + lea 3*4(%esp),%edi
11711 + mov $PT_OLDSS/4+1,%ecx
11712 + xorl %eax,%eax
11713 + rep stosl
11714 + popl_cfi %eax
11715 + popl_cfi %ecx
11716 + popl_cfi %edi
11717 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11718 + pushl_cfi %esp
11719 + call sys_execve
11720 + add $4,%esp
11721 + CFI_ADJUST_CFA_OFFSET -4
11722 + GET_THREAD_INFO(%ebp)
11723 + test %eax,%eax
11724 + jz syscall_exit
11725 + add $PT_OLDSS+4,%esp
11726 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11727 + popl_cfi %ebp
11728 + ret
11729 + CFI_ENDPROC
11730 +ENDPROC(kernel_execve)
11731 +
11732 .macro FIXUP_ESPFIX_STACK
11733 /*
11734 * Switch back for ESPFIX stack to the normal zerobased stack
11735 @@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11736 * normal stack and adjusts ESP with the matching offset.
11737 */
11738 /* fixup the stack */
11739 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11740 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11741 +#ifdef CONFIG_SMP
11742 + movl PER_CPU_VAR(cpu_number), %ebx
11743 + shll $PAGE_SHIFT_asm, %ebx
11744 + addl $cpu_gdt_table, %ebx
11745 +#else
11746 + movl $cpu_gdt_table, %ebx
11747 +#endif
11748 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11749 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11750 shl $16, %eax
11751 addl %esp, %eax /* the adjusted stack pointer */
11752 pushl_cfi $__KERNEL_DS
11753 @@ -1213,7 +1462,6 @@ return_to_handler:
11754 jmp *%ecx
11755 #endif
11756
11757 -.section .rodata,"a"
11758 #include "syscall_table_32.S"
11759
11760 syscall_table_size=(.-sys_call_table)
11761 @@ -1259,9 +1507,12 @@ error_code:
11762 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11763 REG_TO_PTGS %ecx
11764 SET_KERNEL_GS %ecx
11765 - movl $(__USER_DS), %ecx
11766 + movl $(__KERNEL_DS), %ecx
11767 movl %ecx, %ds
11768 movl %ecx, %es
11769 +
11770 + pax_enter_kernel
11771 +
11772 TRACE_IRQS_OFF
11773 movl %esp,%eax # pt_regs pointer
11774 call *%edi
11775 @@ -1346,6 +1597,9 @@ nmi_stack_correct:
11776 xorl %edx,%edx # zero error code
11777 movl %esp,%eax # pt_regs pointer
11778 call do_nmi
11779 +
11780 + pax_exit_kernel
11781 +
11782 jmp restore_all_notrace
11783 CFI_ENDPROC
11784
11785 @@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11786 FIXUP_ESPFIX_STACK # %eax == %esp
11787 xorl %edx,%edx # zero error code
11788 call do_nmi
11789 +
11790 + pax_exit_kernel
11791 +
11792 RESTORE_REGS
11793 lss 12+4(%esp), %esp # back to espfix stack
11794 CFI_ADJUST_CFA_OFFSET -24
11795 diff -urNp linux-3.0.4/arch/x86/kernel/entry_64.S linux-3.0.4/arch/x86/kernel/entry_64.S
11796 --- linux-3.0.4/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11797 +++ linux-3.0.4/arch/x86/kernel/entry_64.S 2011-09-17 18:31:51.000000000 -0400
11798 @@ -53,6 +53,7 @@
11799 #include <asm/paravirt.h>
11800 #include <asm/ftrace.h>
11801 #include <asm/percpu.h>
11802 +#include <asm/pgtable.h>
11803
11804 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11805 #include <linux/elf-em.h>
11806 @@ -176,6 +177,264 @@ ENTRY(native_usergs_sysret64)
11807 ENDPROC(native_usergs_sysret64)
11808 #endif /* CONFIG_PARAVIRT */
11809
11810 + .macro ljmpq sel, off
11811 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11812 + .byte 0x48; ljmp *1234f(%rip)
11813 + .pushsection .rodata
11814 + .align 16
11815 + 1234: .quad \off; .word \sel
11816 + .popsection
11817 +#else
11818 + pushq $\sel
11819 + pushq $\off
11820 + lretq
11821 +#endif
11822 + .endm
11823 +
11824 + .macro pax_enter_kernel
11825 +#ifdef CONFIG_PAX_KERNEXEC
11826 + call pax_enter_kernel
11827 +#endif
11828 + .endm
11829 +
11830 + .macro pax_exit_kernel
11831 +#ifdef CONFIG_PAX_KERNEXEC
11832 + call pax_exit_kernel
11833 +#endif
11834 + .endm
11835 +
11836 +#ifdef CONFIG_PAX_KERNEXEC
11837 +ENTRY(pax_enter_kernel)
11838 + pushq %rdi
11839 +
11840 +#ifdef CONFIG_PARAVIRT
11841 + PV_SAVE_REGS(CLBR_RDI)
11842 +#endif
11843 +
11844 + GET_CR0_INTO_RDI
11845 + bts $16,%rdi
11846 + jnc 1f
11847 + mov %cs,%edi
11848 + cmp $__KERNEL_CS,%edi
11849 + jz 3f
11850 + ljmpq __KERNEL_CS,3f
11851 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11852 +2: SET_RDI_INTO_CR0
11853 +3:
11854 +
11855 +#ifdef CONFIG_PARAVIRT
11856 + PV_RESTORE_REGS(CLBR_RDI)
11857 +#endif
11858 +
11859 + popq %rdi
11860 + retq
11861 +ENDPROC(pax_enter_kernel)
11862 +
11863 +ENTRY(pax_exit_kernel)
11864 + pushq %rdi
11865 +
11866 +#ifdef CONFIG_PARAVIRT
11867 + PV_SAVE_REGS(CLBR_RDI)
11868 +#endif
11869 +
11870 + mov %cs,%rdi
11871 + cmp $__KERNEXEC_KERNEL_CS,%edi
11872 + jnz 2f
11873 + GET_CR0_INTO_RDI
11874 + btr $16,%rdi
11875 + ljmpq __KERNEL_CS,1f
11876 +1: SET_RDI_INTO_CR0
11877 +2:
11878 +
11879 +#ifdef CONFIG_PARAVIRT
11880 + PV_RESTORE_REGS(CLBR_RDI);
11881 +#endif
11882 +
11883 + popq %rdi
11884 + retq
11885 +ENDPROC(pax_exit_kernel)
11886 +#endif
11887 +
11888 + .macro pax_enter_kernel_user
11889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11890 + call pax_enter_kernel_user
11891 +#endif
11892 + .endm
11893 +
11894 + .macro pax_exit_kernel_user
11895 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11896 + call pax_exit_kernel_user
11897 +#endif
11898 +#ifdef CONFIG_PAX_RANDKSTACK
11899 + push %rax
11900 + call pax_randomize_kstack
11901 + pop %rax
11902 +#endif
11903 + .endm
11904 +
11905 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11906 +ENTRY(pax_enter_kernel_user)
11907 + pushq %rdi
11908 + pushq %rbx
11909 +
11910 +#ifdef CONFIG_PARAVIRT
11911 + PV_SAVE_REGS(CLBR_RDI)
11912 +#endif
11913 +
11914 + GET_CR3_INTO_RDI
11915 + mov %rdi,%rbx
11916 + add $__START_KERNEL_map,%rbx
11917 + sub phys_base(%rip),%rbx
11918 +
11919 +#ifdef CONFIG_PARAVIRT
11920 + pushq %rdi
11921 + cmpl $0, pv_info+PARAVIRT_enabled
11922 + jz 1f
11923 + i = 0
11924 + .rept USER_PGD_PTRS
11925 + mov i*8(%rbx),%rsi
11926 + mov $0,%sil
11927 + lea i*8(%rbx),%rdi
11928 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11929 + i = i + 1
11930 + .endr
11931 + jmp 2f
11932 +1:
11933 +#endif
11934 +
11935 + i = 0
11936 + .rept USER_PGD_PTRS
11937 + movb $0,i*8(%rbx)
11938 + i = i + 1
11939 + .endr
11940 +
11941 +#ifdef CONFIG_PARAVIRT
11942 +2: popq %rdi
11943 +#endif
11944 + SET_RDI_INTO_CR3
11945 +
11946 +#ifdef CONFIG_PAX_KERNEXEC
11947 + GET_CR0_INTO_RDI
11948 + bts $16,%rdi
11949 + SET_RDI_INTO_CR0
11950 +#endif
11951 +
11952 +#ifdef CONFIG_PARAVIRT
11953 + PV_RESTORE_REGS(CLBR_RDI)
11954 +#endif
11955 +
11956 + popq %rbx
11957 + popq %rdi
11958 + retq
11959 +ENDPROC(pax_enter_kernel_user)
11960 +
11961 +ENTRY(pax_exit_kernel_user)
11962 + push %rdi
11963 +
11964 +#ifdef CONFIG_PARAVIRT
11965 + pushq %rbx
11966 + PV_SAVE_REGS(CLBR_RDI)
11967 +#endif
11968 +
11969 +#ifdef CONFIG_PAX_KERNEXEC
11970 + GET_CR0_INTO_RDI
11971 + btr $16,%rdi
11972 + SET_RDI_INTO_CR0
11973 +#endif
11974 +
11975 + GET_CR3_INTO_RDI
11976 + add $__START_KERNEL_map,%rdi
11977 + sub phys_base(%rip),%rdi
11978 +
11979 +#ifdef CONFIG_PARAVIRT
11980 + cmpl $0, pv_info+PARAVIRT_enabled
11981 + jz 1f
11982 + mov %rdi,%rbx
11983 + i = 0
11984 + .rept USER_PGD_PTRS
11985 + mov i*8(%rbx),%rsi
11986 + mov $0x67,%sil
11987 + lea i*8(%rbx),%rdi
11988 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
11989 + i = i + 1
11990 + .endr
11991 + jmp 2f
11992 +1:
11993 +#endif
11994 +
11995 + i = 0
11996 + .rept USER_PGD_PTRS
11997 + movb $0x67,i*8(%rdi)
11998 + i = i + 1
11999 + .endr
12000 +
12001 +#ifdef CONFIG_PARAVIRT
12002 +2: PV_RESTORE_REGS(CLBR_RDI)
12003 + popq %rbx
12004 +#endif
12005 +
12006 + popq %rdi
12007 + retq
12008 +ENDPROC(pax_exit_kernel_user)
12009 +#endif
12010 +
12011 + .macro pax_erase_kstack
12012 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12013 + call pax_erase_kstack
12014 +#endif
12015 + .endm
12016 +
12017 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12018 +/*
12019 + * r10: thread_info
12020 + * rcx, rdx: can be clobbered
12021 + */
12022 +ENTRY(pax_erase_kstack)
12023 + pushq %rdi
12024 + pushq %rax
12025 + pushq %r10
12026 +
12027 + GET_THREAD_INFO(%r10)
12028 + mov TI_lowest_stack(%r10), %rdi
12029 + mov $-0xBEEF, %rax
12030 + std
12031 +
12032 +1: mov %edi, %ecx
12033 + and $THREAD_SIZE_asm - 1, %ecx
12034 + shr $3, %ecx
12035 + repne scasq
12036 + jecxz 2f
12037 +
12038 + cmp $2*8, %ecx
12039 + jc 2f
12040 +
12041 + mov $2*8, %ecx
12042 + repe scasq
12043 + jecxz 2f
12044 + jne 1b
12045 +
12046 +2: cld
12047 + mov %esp, %ecx
12048 + sub %edi, %ecx
12049 +
12050 + cmp $THREAD_SIZE_asm, %rcx
12051 + jb 3f
12052 + ud2
12053 +3:
12054 +
12055 + shr $3, %ecx
12056 + rep stosq
12057 +
12058 + mov TI_task_thread_sp0(%r10), %rdi
12059 + sub $256, %rdi
12060 + mov %rdi, TI_lowest_stack(%r10)
12061 +
12062 + popq %r10
12063 + popq %rax
12064 + popq %rdi
12065 + ret
12066 +ENDPROC(pax_erase_kstack)
12067 +#endif
12068
12069 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12070 #ifdef CONFIG_TRACE_IRQFLAGS
12071 @@ -318,7 +577,7 @@ ENTRY(save_args)
12072 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12073 movq_cfi rbp, 8 /* push %rbp */
12074 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12075 - testl $3, CS(%rdi)
12076 + testb $3, CS(%rdi)
12077 je 1f
12078 SWAPGS
12079 /*
12080 @@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
12081
12082 RESTORE_REST
12083
12084 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12085 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12086 je int_ret_from_sys_call
12087
12088 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12089 @@ -455,7 +714,7 @@ END(ret_from_fork)
12090 ENTRY(system_call)
12091 CFI_STARTPROC simple
12092 CFI_SIGNAL_FRAME
12093 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12094 + CFI_DEF_CFA rsp,0
12095 CFI_REGISTER rip,rcx
12096 /*CFI_REGISTER rflags,r11*/
12097 SWAPGS_UNSAFE_STACK
12098 @@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
12099
12100 movq %rsp,PER_CPU_VAR(old_rsp)
12101 movq PER_CPU_VAR(kernel_stack),%rsp
12102 + pax_enter_kernel_user
12103 /*
12104 * No need to follow this irqs off/on section - it's straight
12105 * and short:
12106 */
12107 ENABLE_INTERRUPTS(CLBR_NONE)
12108 - SAVE_ARGS 8,1
12109 + SAVE_ARGS 8*6,1
12110 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12111 movq %rcx,RIP-ARGOFFSET(%rsp)
12112 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12113 @@ -502,6 +762,8 @@ sysret_check:
12114 andl %edi,%edx
12115 jnz sysret_careful
12116 CFI_REMEMBER_STATE
12117 + pax_exit_kernel_user
12118 + pax_erase_kstack
12119 /*
12120 * sysretq will re-enable interrupts:
12121 */
12122 @@ -560,6 +822,9 @@ auditsys:
12123 movq %rax,%rsi /* 2nd arg: syscall number */
12124 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12125 call audit_syscall_entry
12126 +
12127 + pax_erase_kstack
12128 +
12129 LOAD_ARGS 0 /* reload call-clobbered registers */
12130 jmp system_call_fastpath
12131
12132 @@ -590,6 +855,9 @@ tracesys:
12133 FIXUP_TOP_OF_STACK %rdi
12134 movq %rsp,%rdi
12135 call syscall_trace_enter
12136 +
12137 + pax_erase_kstack
12138 +
12139 /*
12140 * Reload arg registers from stack in case ptrace changed them.
12141 * We don't reload %rax because syscall_trace_enter() returned
12142 @@ -611,7 +879,7 @@ tracesys:
12143 GLOBAL(int_ret_from_sys_call)
12144 DISABLE_INTERRUPTS(CLBR_NONE)
12145 TRACE_IRQS_OFF
12146 - testl $3,CS-ARGOFFSET(%rsp)
12147 + testb $3,CS-ARGOFFSET(%rsp)
12148 je retint_restore_args
12149 movl $_TIF_ALLWORK_MASK,%edi
12150 /* edi: mask to check */
12151 @@ -793,6 +1061,16 @@ END(interrupt)
12152 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12153 call save_args
12154 PARTIAL_FRAME 0
12155 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12156 + testb $3, CS(%rdi)
12157 + jnz 1f
12158 + pax_enter_kernel
12159 + jmp 2f
12160 +1: pax_enter_kernel_user
12161 +2:
12162 +#else
12163 + pax_enter_kernel
12164 +#endif
12165 call \func
12166 .endm
12167
12168 @@ -825,7 +1103,7 @@ ret_from_intr:
12169 CFI_ADJUST_CFA_OFFSET -8
12170 exit_intr:
12171 GET_THREAD_INFO(%rcx)
12172 - testl $3,CS-ARGOFFSET(%rsp)
12173 + testb $3,CS-ARGOFFSET(%rsp)
12174 je retint_kernel
12175
12176 /* Interrupt came from user space */
12177 @@ -847,12 +1125,18 @@ retint_swapgs: /* return to user-space
12178 * The iretq could re-enable interrupts:
12179 */
12180 DISABLE_INTERRUPTS(CLBR_ANY)
12181 + pax_exit_kernel_user
12182 + pax_erase_kstack
12183 TRACE_IRQS_IRETQ
12184 SWAPGS
12185 jmp restore_args
12186
12187 retint_restore_args: /* return to kernel space */
12188 DISABLE_INTERRUPTS(CLBR_ANY)
12189 + pax_exit_kernel
12190 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12191 + orb $0x80,0x7+RIP-ARGOFFSET(%rsp)
12192 +#endif
12193 /*
12194 * The iretq could re-enable interrupts:
12195 */
12196 @@ -1027,6 +1311,16 @@ ENTRY(\sym)
12197 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12198 call error_entry
12199 DEFAULT_FRAME 0
12200 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12201 + testb $3, CS(%rsp)
12202 + jnz 1f
12203 + pax_enter_kernel
12204 + jmp 2f
12205 +1: pax_enter_kernel_user
12206 +2:
12207 +#else
12208 + pax_enter_kernel
12209 +#endif
12210 movq %rsp,%rdi /* pt_regs pointer */
12211 xorl %esi,%esi /* no error code */
12212 call \do_sym
12213 @@ -1044,6 +1338,16 @@ ENTRY(\sym)
12214 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12215 call save_paranoid
12216 TRACE_IRQS_OFF
12217 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12218 + testb $3, CS(%rsp)
12219 + jnz 1f
12220 + pax_enter_kernel
12221 + jmp 2f
12222 +1: pax_enter_kernel_user
12223 +2:
12224 +#else
12225 + pax_enter_kernel
12226 +#endif
12227 movq %rsp,%rdi /* pt_regs pointer */
12228 xorl %esi,%esi /* no error code */
12229 call \do_sym
12230 @@ -1052,7 +1356,7 @@ ENTRY(\sym)
12231 END(\sym)
12232 .endm
12233
12234 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12235 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12236 .macro paranoidzeroentry_ist sym do_sym ist
12237 ENTRY(\sym)
12238 INTR_FRAME
12239 @@ -1062,8 +1366,24 @@ ENTRY(\sym)
12240 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12241 call save_paranoid
12242 TRACE_IRQS_OFF
12243 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12244 + testb $3, CS(%rsp)
12245 + jnz 1f
12246 + pax_enter_kernel
12247 + jmp 2f
12248 +1: pax_enter_kernel_user
12249 +2:
12250 +#else
12251 + pax_enter_kernel
12252 +#endif
12253 movq %rsp,%rdi /* pt_regs pointer */
12254 xorl %esi,%esi /* no error code */
12255 +#ifdef CONFIG_SMP
12256 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12257 + lea init_tss(%r12), %r12
12258 +#else
12259 + lea init_tss(%rip), %r12
12260 +#endif
12261 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12262 call \do_sym
12263 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12264 @@ -1080,6 +1400,16 @@ ENTRY(\sym)
12265 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12266 call error_entry
12267 DEFAULT_FRAME 0
12268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12269 + testb $3, CS(%rsp)
12270 + jnz 1f
12271 + pax_enter_kernel
12272 + jmp 2f
12273 +1: pax_enter_kernel_user
12274 +2:
12275 +#else
12276 + pax_enter_kernel
12277 +#endif
12278 movq %rsp,%rdi /* pt_regs pointer */
12279 movq ORIG_RAX(%rsp),%rsi /* get error code */
12280 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12281 @@ -1099,6 +1429,16 @@ ENTRY(\sym)
12282 call save_paranoid
12283 DEFAULT_FRAME 0
12284 TRACE_IRQS_OFF
12285 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12286 + testb $3, CS(%rsp)
12287 + jnz 1f
12288 + pax_enter_kernel
12289 + jmp 2f
12290 +1: pax_enter_kernel_user
12291 +2:
12292 +#else
12293 + pax_enter_kernel
12294 +#endif
12295 movq %rsp,%rdi /* pt_regs pointer */
12296 movq ORIG_RAX(%rsp),%rsi /* get error code */
12297 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12298 @@ -1361,16 +1701,35 @@ ENTRY(paranoid_exit)
12299 TRACE_IRQS_OFF
12300 testl %ebx,%ebx /* swapgs needed? */
12301 jnz paranoid_restore
12302 - testl $3,CS(%rsp)
12303 + testb $3,CS(%rsp)
12304 jnz paranoid_userspace
12305 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12306 + pax_exit_kernel
12307 + TRACE_IRQS_IRETQ 0
12308 + SWAPGS_UNSAFE_STACK
12309 + RESTORE_ALL 8
12310 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12311 + orb $0x80,0x7(%rsp)
12312 +#endif
12313 + jmp irq_return
12314 +#endif
12315 paranoid_swapgs:
12316 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12317 + pax_exit_kernel_user
12318 +#else
12319 + pax_exit_kernel
12320 +#endif
12321 TRACE_IRQS_IRETQ 0
12322 SWAPGS_UNSAFE_STACK
12323 RESTORE_ALL 8
12324 jmp irq_return
12325 paranoid_restore:
12326 + pax_exit_kernel
12327 TRACE_IRQS_IRETQ 0
12328 RESTORE_ALL 8
12329 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12330 + orb $0x80,0x7(%rsp)
12331 +#endif
12332 jmp irq_return
12333 paranoid_userspace:
12334 GET_THREAD_INFO(%rcx)
12335 @@ -1426,7 +1785,7 @@ ENTRY(error_entry)
12336 movq_cfi r14, R14+8
12337 movq_cfi r15, R15+8
12338 xorl %ebx,%ebx
12339 - testl $3,CS+8(%rsp)
12340 + testb $3,CS+8(%rsp)
12341 je error_kernelspace
12342 error_swapgs:
12343 SWAPGS
12344 @@ -1490,6 +1849,16 @@ ENTRY(nmi)
12345 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12346 call save_paranoid
12347 DEFAULT_FRAME 0
12348 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12349 + testb $3, CS(%rsp)
12350 + jnz 1f
12351 + pax_enter_kernel
12352 + jmp 2f
12353 +1: pax_enter_kernel_user
12354 +2:
12355 +#else
12356 + pax_enter_kernel
12357 +#endif
12358 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12359 movq %rsp,%rdi
12360 movq $-1,%rsi
12361 @@ -1500,12 +1869,32 @@ ENTRY(nmi)
12362 DISABLE_INTERRUPTS(CLBR_NONE)
12363 testl %ebx,%ebx /* swapgs needed? */
12364 jnz nmi_restore
12365 - testl $3,CS(%rsp)
12366 + testb $3,CS(%rsp)
12367 jnz nmi_userspace
12368 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12369 + pax_exit_kernel
12370 + SWAPGS_UNSAFE_STACK
12371 + RESTORE_ALL 8
12372 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12373 + orb $0x80,0x7(%rsp)
12374 +#endif
12375 + jmp irq_return
12376 +#endif
12377 nmi_swapgs:
12378 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12379 + pax_exit_kernel_user
12380 +#else
12381 + pax_exit_kernel
12382 +#endif
12383 SWAPGS_UNSAFE_STACK
12384 + RESTORE_ALL 8
12385 + jmp irq_return
12386 nmi_restore:
12387 + pax_exit_kernel
12388 RESTORE_ALL 8
12389 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
12390 + orb $0x80,0x7(%rsp)
12391 +#endif
12392 jmp irq_return
12393 nmi_userspace:
12394 GET_THREAD_INFO(%rcx)
12395 diff -urNp linux-3.0.4/arch/x86/kernel/ftrace.c linux-3.0.4/arch/x86/kernel/ftrace.c
12396 --- linux-3.0.4/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12397 +++ linux-3.0.4/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12398 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12399 static const void *mod_code_newcode; /* holds the text to write to the IP */
12400
12401 static unsigned nmi_wait_count;
12402 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12403 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12404
12405 int ftrace_arch_read_dyn_info(char *buf, int size)
12406 {
12407 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12408
12409 r = snprintf(buf, size, "%u %u",
12410 nmi_wait_count,
12411 - atomic_read(&nmi_update_count));
12412 + atomic_read_unchecked(&nmi_update_count));
12413 return r;
12414 }
12415
12416 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12417
12418 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12419 smp_rmb();
12420 + pax_open_kernel();
12421 ftrace_mod_code();
12422 - atomic_inc(&nmi_update_count);
12423 + pax_close_kernel();
12424 + atomic_inc_unchecked(&nmi_update_count);
12425 }
12426 /* Must have previous changes seen before executions */
12427 smp_mb();
12428 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12429 {
12430 unsigned char replaced[MCOUNT_INSN_SIZE];
12431
12432 + ip = ktla_ktva(ip);
12433 +
12434 /*
12435 * Note: Due to modules and __init, code can
12436 * disappear and change, we need to protect against faulting
12437 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12438 unsigned char old[MCOUNT_INSN_SIZE], *new;
12439 int ret;
12440
12441 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12442 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12443 new = ftrace_call_replace(ip, (unsigned long)func);
12444 ret = ftrace_modify_code(ip, old, new);
12445
12446 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12447 {
12448 unsigned char code[MCOUNT_INSN_SIZE];
12449
12450 + ip = ktla_ktva(ip);
12451 +
12452 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12453 return -EFAULT;
12454
12455 diff -urNp linux-3.0.4/arch/x86/kernel/head32.c linux-3.0.4/arch/x86/kernel/head32.c
12456 --- linux-3.0.4/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12457 +++ linux-3.0.4/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12458 @@ -19,6 +19,7 @@
12459 #include <asm/io_apic.h>
12460 #include <asm/bios_ebda.h>
12461 #include <asm/tlbflush.h>
12462 +#include <asm/boot.h>
12463
12464 static void __init i386_default_early_setup(void)
12465 {
12466 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12467 {
12468 memblock_init();
12469
12470 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12471 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12472
12473 #ifdef CONFIG_BLK_DEV_INITRD
12474 /* Reserve INITRD */
12475 diff -urNp linux-3.0.4/arch/x86/kernel/head_32.S linux-3.0.4/arch/x86/kernel/head_32.S
12476 --- linux-3.0.4/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12477 +++ linux-3.0.4/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12478 @@ -25,6 +25,12 @@
12479 /* Physical address */
12480 #define pa(X) ((X) - __PAGE_OFFSET)
12481
12482 +#ifdef CONFIG_PAX_KERNEXEC
12483 +#define ta(X) (X)
12484 +#else
12485 +#define ta(X) ((X) - __PAGE_OFFSET)
12486 +#endif
12487 +
12488 /*
12489 * References to members of the new_cpu_data structure.
12490 */
12491 @@ -54,11 +60,7 @@
12492 * and small than max_low_pfn, otherwise will waste some page table entries
12493 */
12494
12495 -#if PTRS_PER_PMD > 1
12496 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12497 -#else
12498 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12499 -#endif
12500 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12501
12502 /* Number of possible pages in the lowmem region */
12503 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12504 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12505 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12506
12507 /*
12508 + * Real beginning of normal "text" segment
12509 + */
12510 +ENTRY(stext)
12511 +ENTRY(_stext)
12512 +
12513 +/*
12514 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12515 * %esi points to the real-mode code as a 32-bit pointer.
12516 * CS and DS must be 4 GB flat segments, but we don't depend on
12517 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12518 * can.
12519 */
12520 __HEAD
12521 +
12522 +#ifdef CONFIG_PAX_KERNEXEC
12523 + jmp startup_32
12524 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12525 +.fill PAGE_SIZE-5,1,0xcc
12526 +#endif
12527 +
12528 ENTRY(startup_32)
12529 movl pa(stack_start),%ecx
12530
12531 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12532 2:
12533 leal -__PAGE_OFFSET(%ecx),%esp
12534
12535 +#ifdef CONFIG_SMP
12536 + movl $pa(cpu_gdt_table),%edi
12537 + movl $__per_cpu_load,%eax
12538 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12539 + rorl $16,%eax
12540 + movb %al,__KERNEL_PERCPU + 4(%edi)
12541 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12542 + movl $__per_cpu_end - 1,%eax
12543 + subl $__per_cpu_start,%eax
12544 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12545 +#endif
12546 +
12547 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12548 + movl $NR_CPUS,%ecx
12549 + movl $pa(cpu_gdt_table),%edi
12550 +1:
12551 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12552 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12553 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12554 + addl $PAGE_SIZE_asm,%edi
12555 + loop 1b
12556 +#endif
12557 +
12558 +#ifdef CONFIG_PAX_KERNEXEC
12559 + movl $pa(boot_gdt),%edi
12560 + movl $__LOAD_PHYSICAL_ADDR,%eax
12561 + movw %ax,__BOOT_CS + 2(%edi)
12562 + rorl $16,%eax
12563 + movb %al,__BOOT_CS + 4(%edi)
12564 + movb %ah,__BOOT_CS + 7(%edi)
12565 + rorl $16,%eax
12566 +
12567 + ljmp $(__BOOT_CS),$1f
12568 +1:
12569 +
12570 + movl $NR_CPUS,%ecx
12571 + movl $pa(cpu_gdt_table),%edi
12572 + addl $__PAGE_OFFSET,%eax
12573 +1:
12574 + movw %ax,__KERNEL_CS + 2(%edi)
12575 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12576 + rorl $16,%eax
12577 + movb %al,__KERNEL_CS + 4(%edi)
12578 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12579 + movb %ah,__KERNEL_CS + 7(%edi)
12580 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12581 + rorl $16,%eax
12582 + addl $PAGE_SIZE_asm,%edi
12583 + loop 1b
12584 +#endif
12585 +
12586 /*
12587 * Clear BSS first so that there are no surprises...
12588 */
12589 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12590 movl %eax, pa(max_pfn_mapped)
12591
12592 /* Do early initialization of the fixmap area */
12593 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12594 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12595 +#ifdef CONFIG_COMPAT_VDSO
12596 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12597 +#else
12598 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12599 +#endif
12600 #else /* Not PAE */
12601
12602 page_pde_offset = (__PAGE_OFFSET >> 20);
12603 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12604 movl %eax, pa(max_pfn_mapped)
12605
12606 /* Do early initialization of the fixmap area */
12607 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12608 - movl %eax,pa(initial_page_table+0xffc)
12609 +#ifdef CONFIG_COMPAT_VDSO
12610 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12611 +#else
12612 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12613 +#endif
12614 #endif
12615
12616 #ifdef CONFIG_PARAVIRT
12617 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12618 cmpl $num_subarch_entries, %eax
12619 jae bad_subarch
12620
12621 - movl pa(subarch_entries)(,%eax,4), %eax
12622 - subl $__PAGE_OFFSET, %eax
12623 - jmp *%eax
12624 + jmp *pa(subarch_entries)(,%eax,4)
12625
12626 bad_subarch:
12627 WEAK(lguest_entry)
12628 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12629 __INITDATA
12630
12631 subarch_entries:
12632 - .long default_entry /* normal x86/PC */
12633 - .long lguest_entry /* lguest hypervisor */
12634 - .long xen_entry /* Xen hypervisor */
12635 - .long default_entry /* Moorestown MID */
12636 + .long ta(default_entry) /* normal x86/PC */
12637 + .long ta(lguest_entry) /* lguest hypervisor */
12638 + .long ta(xen_entry) /* Xen hypervisor */
12639 + .long ta(default_entry) /* Moorestown MID */
12640 num_subarch_entries = (. - subarch_entries) / 4
12641 .previous
12642 #else
12643 @@ -312,6 +382,7 @@ default_entry:
12644 orl %edx,%eax
12645 movl %eax,%cr4
12646
12647 +#ifdef CONFIG_X86_PAE
12648 testb $X86_CR4_PAE, %al # check if PAE is enabled
12649 jz 6f
12650
12651 @@ -340,6 +411,9 @@ default_entry:
12652 /* Make changes effective */
12653 wrmsr
12654
12655 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12656 +#endif
12657 +
12658 6:
12659
12660 /*
12661 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12662 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12663 movl %eax,%ss # after changing gdt.
12664
12665 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12666 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12667 movl %eax,%ds
12668 movl %eax,%es
12669
12670 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12671 */
12672 cmpb $0,ready
12673 jne 1f
12674 - movl $gdt_page,%eax
12675 + movl $cpu_gdt_table,%eax
12676 movl $stack_canary,%ecx
12677 +#ifdef CONFIG_SMP
12678 + addl $__per_cpu_load,%ecx
12679 +#endif
12680 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12681 shrl $16, %ecx
12682 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12683 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12684 1:
12685 -#endif
12686 movl $(__KERNEL_STACK_CANARY),%eax
12687 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12688 + movl $(__USER_DS),%eax
12689 +#else
12690 + xorl %eax,%eax
12691 +#endif
12692 movl %eax,%gs
12693
12694 xorl %eax,%eax # Clear LDT
12695 @@ -558,22 +639,22 @@ early_page_fault:
12696 jmp early_fault
12697
12698 early_fault:
12699 - cld
12700 #ifdef CONFIG_PRINTK
12701 + cmpl $1,%ss:early_recursion_flag
12702 + je hlt_loop
12703 + incl %ss:early_recursion_flag
12704 + cld
12705 pusha
12706 movl $(__KERNEL_DS),%eax
12707 movl %eax,%ds
12708 movl %eax,%es
12709 - cmpl $2,early_recursion_flag
12710 - je hlt_loop
12711 - incl early_recursion_flag
12712 movl %cr2,%eax
12713 pushl %eax
12714 pushl %edx /* trapno */
12715 pushl $fault_msg
12716 call printk
12717 +; call dump_stack
12718 #endif
12719 - call dump_stack
12720 hlt_loop:
12721 hlt
12722 jmp hlt_loop
12723 @@ -581,8 +662,11 @@ hlt_loop:
12724 /* This is the default interrupt "handler" :-) */
12725 ALIGN
12726 ignore_int:
12727 - cld
12728 #ifdef CONFIG_PRINTK
12729 + cmpl $2,%ss:early_recursion_flag
12730 + je hlt_loop
12731 + incl %ss:early_recursion_flag
12732 + cld
12733 pushl %eax
12734 pushl %ecx
12735 pushl %edx
12736 @@ -591,9 +675,6 @@ ignore_int:
12737 movl $(__KERNEL_DS),%eax
12738 movl %eax,%ds
12739 movl %eax,%es
12740 - cmpl $2,early_recursion_flag
12741 - je hlt_loop
12742 - incl early_recursion_flag
12743 pushl 16(%esp)
12744 pushl 24(%esp)
12745 pushl 32(%esp)
12746 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12747 /*
12748 * BSS section
12749 */
12750 -__PAGE_ALIGNED_BSS
12751 - .align PAGE_SIZE
12752 #ifdef CONFIG_X86_PAE
12753 +.section .initial_pg_pmd,"a",@progbits
12754 initial_pg_pmd:
12755 .fill 1024*KPMDS,4,0
12756 #else
12757 +.section .initial_page_table,"a",@progbits
12758 ENTRY(initial_page_table)
12759 .fill 1024,4,0
12760 #endif
12761 +.section .initial_pg_fixmap,"a",@progbits
12762 initial_pg_fixmap:
12763 .fill 1024,4,0
12764 +.section .empty_zero_page,"a",@progbits
12765 ENTRY(empty_zero_page)
12766 .fill 4096,1,0
12767 +.section .swapper_pg_dir,"a",@progbits
12768 ENTRY(swapper_pg_dir)
12769 +#ifdef CONFIG_X86_PAE
12770 + .fill 4,8,0
12771 +#else
12772 .fill 1024,4,0
12773 +#endif
12774 +
12775 +/*
12776 + * The IDT has to be page-aligned to simplify the Pentium
12777 + * F0 0F bug workaround.. We have a special link segment
12778 + * for this.
12779 + */
12780 +.section .idt,"a",@progbits
12781 +ENTRY(idt_table)
12782 + .fill 256,8,0
12783
12784 /*
12785 * This starts the data section.
12786 */
12787 #ifdef CONFIG_X86_PAE
12788 -__PAGE_ALIGNED_DATA
12789 - /* Page-aligned for the benefit of paravirt? */
12790 - .align PAGE_SIZE
12791 +.section .initial_page_table,"a",@progbits
12792 ENTRY(initial_page_table)
12793 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12794 # if KPMDS == 3
12795 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12796 # error "Kernel PMDs should be 1, 2 or 3"
12797 # endif
12798 .align PAGE_SIZE /* needs to be page-sized too */
12799 +
12800 +#ifdef CONFIG_PAX_PER_CPU_PGD
12801 +ENTRY(cpu_pgd)
12802 + .rept NR_CPUS
12803 + .fill 4,8,0
12804 + .endr
12805 +#endif
12806 +
12807 #endif
12808
12809 .data
12810 .balign 4
12811 ENTRY(stack_start)
12812 - .long init_thread_union+THREAD_SIZE
12813 + .long init_thread_union+THREAD_SIZE-8
12814 +
12815 +ready: .byte 0
12816
12817 +.section .rodata,"a",@progbits
12818 early_recursion_flag:
12819 .long 0
12820
12821 -ready: .byte 0
12822 -
12823 int_msg:
12824 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12825
12826 @@ -707,7 +811,7 @@ fault_msg:
12827 .word 0 # 32 bit align gdt_desc.address
12828 boot_gdt_descr:
12829 .word __BOOT_DS+7
12830 - .long boot_gdt - __PAGE_OFFSET
12831 + .long pa(boot_gdt)
12832
12833 .word 0 # 32-bit align idt_desc.address
12834 idt_descr:
12835 @@ -718,7 +822,7 @@ idt_descr:
12836 .word 0 # 32 bit align gdt_desc.address
12837 ENTRY(early_gdt_descr)
12838 .word GDT_ENTRIES*8-1
12839 - .long gdt_page /* Overwritten for secondary CPUs */
12840 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12841
12842 /*
12843 * The boot_gdt must mirror the equivalent in setup.S and is
12844 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12845 .align L1_CACHE_BYTES
12846 ENTRY(boot_gdt)
12847 .fill GDT_ENTRY_BOOT_CS,8,0
12848 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12849 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12850 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12851 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12852 +
12853 + .align PAGE_SIZE_asm
12854 +ENTRY(cpu_gdt_table)
12855 + .rept NR_CPUS
12856 + .quad 0x0000000000000000 /* NULL descriptor */
12857 + .quad 0x0000000000000000 /* 0x0b reserved */
12858 + .quad 0x0000000000000000 /* 0x13 reserved */
12859 + .quad 0x0000000000000000 /* 0x1b reserved */
12860 +
12861 +#ifdef CONFIG_PAX_KERNEXEC
12862 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12863 +#else
12864 + .quad 0x0000000000000000 /* 0x20 unused */
12865 +#endif
12866 +
12867 + .quad 0x0000000000000000 /* 0x28 unused */
12868 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12869 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12870 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12871 + .quad 0x0000000000000000 /* 0x4b reserved */
12872 + .quad 0x0000000000000000 /* 0x53 reserved */
12873 + .quad 0x0000000000000000 /* 0x5b reserved */
12874 +
12875 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12876 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12877 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12878 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12879 +
12880 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12881 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12882 +
12883 + /*
12884 + * Segments used for calling PnP BIOS have byte granularity.
12885 + * The code segments and data segments have fixed 64k limits,
12886 + * the transfer segment sizes are set at run time.
12887 + */
12888 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12889 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12890 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12891 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12892 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12893 +
12894 + /*
12895 + * The APM segments have byte granularity and their bases
12896 + * are set at run time. All have 64k limits.
12897 + */
12898 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12899 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12900 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12901 +
12902 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12903 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12904 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12905 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12906 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12907 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12908 +
12909 + /* Be sure this is zeroed to avoid false validations in Xen */
12910 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12911 + .endr
12912 diff -urNp linux-3.0.4/arch/x86/kernel/head_64.S linux-3.0.4/arch/x86/kernel/head_64.S
12913 --- linux-3.0.4/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
12914 +++ linux-3.0.4/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
12915 @@ -19,6 +19,7 @@
12916 #include <asm/cache.h>
12917 #include <asm/processor-flags.h>
12918 #include <asm/percpu.h>
12919 +#include <asm/cpufeature.h>
12920
12921 #ifdef CONFIG_PARAVIRT
12922 #include <asm/asm-offsets.h>
12923 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12924 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12925 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12926 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12927 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12928 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12929 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12930 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12931
12932 .text
12933 __HEAD
12934 @@ -85,35 +90,22 @@ startup_64:
12935 */
12936 addq %rbp, init_level4_pgt + 0(%rip)
12937 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12938 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12939 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12940 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12941
12942 addq %rbp, level3_ident_pgt + 0(%rip)
12943 +#ifndef CONFIG_XEN
12944 + addq %rbp, level3_ident_pgt + 8(%rip)
12945 +#endif
12946
12947 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12948 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12949 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12950
12951 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12952 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12953 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12954
12955 - /* Add an Identity mapping if I am above 1G */
12956 - leaq _text(%rip), %rdi
12957 - andq $PMD_PAGE_MASK, %rdi
12958 -
12959 - movq %rdi, %rax
12960 - shrq $PUD_SHIFT, %rax
12961 - andq $(PTRS_PER_PUD - 1), %rax
12962 - jz ident_complete
12963 -
12964 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12965 - leaq level3_ident_pgt(%rip), %rbx
12966 - movq %rdx, 0(%rbx, %rax, 8)
12967 -
12968 - movq %rdi, %rax
12969 - shrq $PMD_SHIFT, %rax
12970 - andq $(PTRS_PER_PMD - 1), %rax
12971 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12972 - leaq level2_spare_pgt(%rip), %rbx
12973 - movq %rdx, 0(%rbx, %rax, 8)
12974 -ident_complete:
12975 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12976 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12977
12978 /*
12979 * Fixup the kernel text+data virtual addresses. Note that
12980 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12981 * after the boot processor executes this code.
12982 */
12983
12984 - /* Enable PAE mode and PGE */
12985 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12986 + /* Enable PAE mode and PSE/PGE */
12987 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12988 movq %rax, %cr4
12989
12990 /* Setup early boot stage 4 level pagetables. */
12991 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12992 movl $MSR_EFER, %ecx
12993 rdmsr
12994 btsl $_EFER_SCE, %eax /* Enable System Call */
12995 - btl $20,%edi /* No Execute supported? */
12996 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12997 jnc 1f
12998 btsl $_EFER_NX, %eax
12999 + leaq init_level4_pgt(%rip), %rdi
13000 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13001 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13002 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13003 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13004 1: wrmsr /* Make changes effective */
13005
13006 /* Setup cr0 */
13007 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13008 bad_address:
13009 jmp bad_address
13010
13011 - .section ".init.text","ax"
13012 + __INIT
13013 #ifdef CONFIG_EARLY_PRINTK
13014 .globl early_idt_handlers
13015 early_idt_handlers:
13016 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13017 #endif /* EARLY_PRINTK */
13018 1: hlt
13019 jmp 1b
13020 + .previous
13021
13022 #ifdef CONFIG_EARLY_PRINTK
13023 + __INITDATA
13024 early_recursion_flag:
13025 .long 0
13026 + .previous
13027
13028 + .section .rodata,"a",@progbits
13029 early_idt_msg:
13030 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13031 early_idt_ripmsg:
13032 .asciz "RIP %s\n"
13033 -#endif /* CONFIG_EARLY_PRINTK */
13034 .previous
13035 +#endif /* CONFIG_EARLY_PRINTK */
13036
13037 + .section .rodata,"a",@progbits
13038 #define NEXT_PAGE(name) \
13039 .balign PAGE_SIZE; \
13040 ENTRY(name)
13041 @@ -338,7 +340,6 @@ ENTRY(name)
13042 i = i + 1 ; \
13043 .endr
13044
13045 - .data
13046 /*
13047 * This default setting generates an ident mapping at address 0x100000
13048 * and a mapping for the kernel that precisely maps virtual address
13049 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13050 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13051 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13052 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13053 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13054 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13055 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13056 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13057 .org init_level4_pgt + L4_START_KERNEL*8, 0
13058 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13059 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13060
13061 +#ifdef CONFIG_PAX_PER_CPU_PGD
13062 +NEXT_PAGE(cpu_pgd)
13063 + .rept NR_CPUS
13064 + .fill 512,8,0
13065 + .endr
13066 +#endif
13067 +
13068 NEXT_PAGE(level3_ident_pgt)
13069 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13070 +#ifdef CONFIG_XEN
13071 .fill 511,8,0
13072 +#else
13073 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13074 + .fill 510,8,0
13075 +#endif
13076 +
13077 +NEXT_PAGE(level3_vmalloc_pgt)
13078 + .fill 512,8,0
13079 +
13080 +NEXT_PAGE(level3_vmemmap_pgt)
13081 + .fill L3_VMEMMAP_START,8,0
13082 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13083
13084 NEXT_PAGE(level3_kernel_pgt)
13085 .fill L3_START_KERNEL,8,0
13086 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13087 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13088 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13089
13090 +NEXT_PAGE(level2_vmemmap_pgt)
13091 + .fill 512,8,0
13092 +
13093 NEXT_PAGE(level2_fixmap_pgt)
13094 - .fill 506,8,0
13095 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13096 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13097 - .fill 5,8,0
13098 + .fill 507,8,0
13099 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13100 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13101 + .fill 4,8,0
13102
13103 -NEXT_PAGE(level1_fixmap_pgt)
13104 +NEXT_PAGE(level1_vsyscall_pgt)
13105 .fill 512,8,0
13106
13107 -NEXT_PAGE(level2_ident_pgt)
13108 - /* Since I easily can, map the first 1G.
13109 + /* Since I easily can, map the first 2G.
13110 * Don't set NX because code runs from these pages.
13111 */
13112 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13113 +NEXT_PAGE(level2_ident_pgt)
13114 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13115
13116 NEXT_PAGE(level2_kernel_pgt)
13117 /*
13118 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13119 * If you want to increase this then increase MODULES_VADDR
13120 * too.)
13121 */
13122 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13123 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13124 -
13125 -NEXT_PAGE(level2_spare_pgt)
13126 - .fill 512, 8, 0
13127 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13128
13129 #undef PMDS
13130 #undef NEXT_PAGE
13131
13132 - .data
13133 + .align PAGE_SIZE
13134 +ENTRY(cpu_gdt_table)
13135 + .rept NR_CPUS
13136 + .quad 0x0000000000000000 /* NULL descriptor */
13137 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13138 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13139 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13140 + .quad 0x00cffb000000ffff /* __USER32_CS */
13141 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13142 + .quad 0x00affb000000ffff /* __USER_CS */
13143 +
13144 +#ifdef CONFIG_PAX_KERNEXEC
13145 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13146 +#else
13147 + .quad 0x0 /* unused */
13148 +#endif
13149 +
13150 + .quad 0,0 /* TSS */
13151 + .quad 0,0 /* LDT */
13152 + .quad 0,0,0 /* three TLS descriptors */
13153 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13154 + /* asm/segment.h:GDT_ENTRIES must match this */
13155 +
13156 + /* zero the remaining page */
13157 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13158 + .endr
13159 +
13160 .align 16
13161 .globl early_gdt_descr
13162 early_gdt_descr:
13163 .word GDT_ENTRIES*8-1
13164 early_gdt_descr_base:
13165 - .quad INIT_PER_CPU_VAR(gdt_page)
13166 + .quad cpu_gdt_table
13167
13168 ENTRY(phys_base)
13169 /* This must match the first entry in level2_kernel_pgt */
13170 .quad 0x0000000000000000
13171
13172 #include "../../x86/xen/xen-head.S"
13173 -
13174 - .section .bss, "aw", @nobits
13175 +
13176 + .section .rodata,"a",@progbits
13177 .align L1_CACHE_BYTES
13178 ENTRY(idt_table)
13179 - .skip IDT_ENTRIES * 16
13180 + .fill 512,8,0
13181
13182 __PAGE_ALIGNED_BSS
13183 .align PAGE_SIZE
13184 diff -urNp linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c
13185 --- linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13186 +++ linux-3.0.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13187 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13188 EXPORT_SYMBOL(cmpxchg8b_emu);
13189 #endif
13190
13191 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13192 +
13193 /* Networking helper routines. */
13194 EXPORT_SYMBOL(csum_partial_copy_generic);
13195 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13196 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13197
13198 EXPORT_SYMBOL(__get_user_1);
13199 EXPORT_SYMBOL(__get_user_2);
13200 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13201
13202 EXPORT_SYMBOL(csum_partial);
13203 EXPORT_SYMBOL(empty_zero_page);
13204 +
13205 +#ifdef CONFIG_PAX_KERNEXEC
13206 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13207 +#endif
13208 diff -urNp linux-3.0.4/arch/x86/kernel/i8259.c linux-3.0.4/arch/x86/kernel/i8259.c
13209 --- linux-3.0.4/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13210 +++ linux-3.0.4/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13211 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13212 "spurious 8259A interrupt: IRQ%d.\n", irq);
13213 spurious_irq_mask |= irqmask;
13214 }
13215 - atomic_inc(&irq_err_count);
13216 + atomic_inc_unchecked(&irq_err_count);
13217 /*
13218 * Theoretically we do not have to handle this IRQ,
13219 * but in Linux this does not cause problems and is
13220 diff -urNp linux-3.0.4/arch/x86/kernel/init_task.c linux-3.0.4/arch/x86/kernel/init_task.c
13221 --- linux-3.0.4/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13222 +++ linux-3.0.4/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13223 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13224 * way process stacks are handled. This is done by having a special
13225 * "init_task" linker map entry..
13226 */
13227 -union thread_union init_thread_union __init_task_data =
13228 - { INIT_THREAD_INFO(init_task) };
13229 +union thread_union init_thread_union __init_task_data;
13230
13231 /*
13232 * Initial task structure.
13233 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13234 * section. Since TSS's are completely CPU-local, we want them
13235 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13236 */
13237 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13238 -
13239 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13240 +EXPORT_SYMBOL(init_tss);
13241 diff -urNp linux-3.0.4/arch/x86/kernel/ioport.c linux-3.0.4/arch/x86/kernel/ioport.c
13242 --- linux-3.0.4/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13243 +++ linux-3.0.4/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13244 @@ -6,6 +6,7 @@
13245 #include <linux/sched.h>
13246 #include <linux/kernel.h>
13247 #include <linux/capability.h>
13248 +#include <linux/security.h>
13249 #include <linux/errno.h>
13250 #include <linux/types.h>
13251 #include <linux/ioport.h>
13252 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13253
13254 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13255 return -EINVAL;
13256 +#ifdef CONFIG_GRKERNSEC_IO
13257 + if (turn_on && grsec_disable_privio) {
13258 + gr_handle_ioperm();
13259 + return -EPERM;
13260 + }
13261 +#endif
13262 if (turn_on && !capable(CAP_SYS_RAWIO))
13263 return -EPERM;
13264
13265 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13266 * because the ->io_bitmap_max value must match the bitmap
13267 * contents:
13268 */
13269 - tss = &per_cpu(init_tss, get_cpu());
13270 + tss = init_tss + get_cpu();
13271
13272 if (turn_on)
13273 bitmap_clear(t->io_bitmap_ptr, from, num);
13274 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13275 return -EINVAL;
13276 /* Trying to gain more privileges? */
13277 if (level > old) {
13278 +#ifdef CONFIG_GRKERNSEC_IO
13279 + if (grsec_disable_privio) {
13280 + gr_handle_iopl();
13281 + return -EPERM;
13282 + }
13283 +#endif
13284 if (!capable(CAP_SYS_RAWIO))
13285 return -EPERM;
13286 }
13287 diff -urNp linux-3.0.4/arch/x86/kernel/irq_32.c linux-3.0.4/arch/x86/kernel/irq_32.c
13288 --- linux-3.0.4/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13289 +++ linux-3.0.4/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13290 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13291 __asm__ __volatile__("andl %%esp,%0" :
13292 "=r" (sp) : "0" (THREAD_SIZE - 1));
13293
13294 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13295 + return sp < STACK_WARN;
13296 }
13297
13298 static void print_stack_overflow(void)
13299 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13300 * per-CPU IRQ handling contexts (thread information and stack)
13301 */
13302 union irq_ctx {
13303 - struct thread_info tinfo;
13304 - u32 stack[THREAD_SIZE/sizeof(u32)];
13305 + unsigned long previous_esp;
13306 + u32 stack[THREAD_SIZE/sizeof(u32)];
13307 } __attribute__((aligned(THREAD_SIZE)));
13308
13309 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13310 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13311 static inline int
13312 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13313 {
13314 - union irq_ctx *curctx, *irqctx;
13315 + union irq_ctx *irqctx;
13316 u32 *isp, arg1, arg2;
13317
13318 - curctx = (union irq_ctx *) current_thread_info();
13319 irqctx = __this_cpu_read(hardirq_ctx);
13320
13321 /*
13322 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13323 * handler) we can't do that and just have to keep using the
13324 * current stack (which is the irq stack already after all)
13325 */
13326 - if (unlikely(curctx == irqctx))
13327 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13328 return 0;
13329
13330 /* build the stack frame on the IRQ stack */
13331 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13332 - irqctx->tinfo.task = curctx->tinfo.task;
13333 - irqctx->tinfo.previous_esp = current_stack_pointer;
13334 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13335 + irqctx->previous_esp = current_stack_pointer;
13336
13337 - /*
13338 - * Copy the softirq bits in preempt_count so that the
13339 - * softirq checks work in the hardirq context.
13340 - */
13341 - irqctx->tinfo.preempt_count =
13342 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13343 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13344 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13345 + __set_fs(MAKE_MM_SEG(0));
13346 +#endif
13347
13348 if (unlikely(overflow))
13349 call_on_stack(print_stack_overflow, isp);
13350 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13351 : "0" (irq), "1" (desc), "2" (isp),
13352 "D" (desc->handle_irq)
13353 : "memory", "cc", "ecx");
13354 +
13355 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13356 + __set_fs(current_thread_info()->addr_limit);
13357 +#endif
13358 +
13359 return 1;
13360 }
13361
13362 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13363 */
13364 void __cpuinit irq_ctx_init(int cpu)
13365 {
13366 - union irq_ctx *irqctx;
13367 -
13368 if (per_cpu(hardirq_ctx, cpu))
13369 return;
13370
13371 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13372 - THREAD_FLAGS,
13373 - THREAD_ORDER));
13374 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13375 - irqctx->tinfo.cpu = cpu;
13376 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13377 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13378 -
13379 - per_cpu(hardirq_ctx, cpu) = irqctx;
13380 -
13381 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13382 - THREAD_FLAGS,
13383 - THREAD_ORDER));
13384 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13385 - irqctx->tinfo.cpu = cpu;
13386 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13387 -
13388 - per_cpu(softirq_ctx, cpu) = irqctx;
13389 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13390 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13391
13392 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13393 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13394 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13395 asmlinkage void do_softirq(void)
13396 {
13397 unsigned long flags;
13398 - struct thread_info *curctx;
13399 union irq_ctx *irqctx;
13400 u32 *isp;
13401
13402 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13403 local_irq_save(flags);
13404
13405 if (local_softirq_pending()) {
13406 - curctx = current_thread_info();
13407 irqctx = __this_cpu_read(softirq_ctx);
13408 - irqctx->tinfo.task = curctx->task;
13409 - irqctx->tinfo.previous_esp = current_stack_pointer;
13410 + irqctx->previous_esp = current_stack_pointer;
13411
13412 /* build the stack frame on the softirq stack */
13413 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13414 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13415 +
13416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13417 + __set_fs(MAKE_MM_SEG(0));
13418 +#endif
13419
13420 call_on_stack(__do_softirq, isp);
13421 +
13422 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13423 + __set_fs(current_thread_info()->addr_limit);
13424 +#endif
13425 +
13426 /*
13427 * Shouldn't happen, we returned above if in_interrupt():
13428 */
13429 diff -urNp linux-3.0.4/arch/x86/kernel/irq.c linux-3.0.4/arch/x86/kernel/irq.c
13430 --- linux-3.0.4/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13431 +++ linux-3.0.4/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13432 @@ -17,7 +17,7 @@
13433 #include <asm/mce.h>
13434 #include <asm/hw_irq.h>
13435
13436 -atomic_t irq_err_count;
13437 +atomic_unchecked_t irq_err_count;
13438
13439 /* Function pointer for generic interrupt vector handling */
13440 void (*x86_platform_ipi_callback)(void) = NULL;
13441 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13442 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13443 seq_printf(p, " Machine check polls\n");
13444 #endif
13445 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13446 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13447 #if defined(CONFIG_X86_IO_APIC)
13448 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13449 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13450 #endif
13451 return 0;
13452 }
13453 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13454
13455 u64 arch_irq_stat(void)
13456 {
13457 - u64 sum = atomic_read(&irq_err_count);
13458 + u64 sum = atomic_read_unchecked(&irq_err_count);
13459
13460 #ifdef CONFIG_X86_IO_APIC
13461 - sum += atomic_read(&irq_mis_count);
13462 + sum += atomic_read_unchecked(&irq_mis_count);
13463 #endif
13464 return sum;
13465 }
13466 diff -urNp linux-3.0.4/arch/x86/kernel/kgdb.c linux-3.0.4/arch/x86/kernel/kgdb.c
13467 --- linux-3.0.4/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13468 +++ linux-3.0.4/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13469 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13470 #ifdef CONFIG_X86_32
13471 switch (regno) {
13472 case GDB_SS:
13473 - if (!user_mode_vm(regs))
13474 + if (!user_mode(regs))
13475 *(unsigned long *)mem = __KERNEL_DS;
13476 break;
13477 case GDB_SP:
13478 - if (!user_mode_vm(regs))
13479 + if (!user_mode(regs))
13480 *(unsigned long *)mem = kernel_stack_pointer(regs);
13481 break;
13482 case GDB_GS:
13483 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13484 case 'k':
13485 /* clear the trace bit */
13486 linux_regs->flags &= ~X86_EFLAGS_TF;
13487 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13488 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13489
13490 /* set the trace bit if we're stepping */
13491 if (remcomInBuffer[0] == 's') {
13492 linux_regs->flags |= X86_EFLAGS_TF;
13493 - atomic_set(&kgdb_cpu_doing_single_step,
13494 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13495 raw_smp_processor_id());
13496 }
13497
13498 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13499 return NOTIFY_DONE;
13500
13501 case DIE_DEBUG:
13502 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13503 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13504 if (user_mode(regs))
13505 return single_step_cont(regs, args);
13506 break;
13507 diff -urNp linux-3.0.4/arch/x86/kernel/kprobes.c linux-3.0.4/arch/x86/kernel/kprobes.c
13508 --- linux-3.0.4/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13509 +++ linux-3.0.4/arch/x86/kernel/kprobes.c 2011-08-23 21:47:55.000000000 -0400
13510 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13511 } __attribute__((packed)) *insn;
13512
13513 insn = (struct __arch_relative_insn *)from;
13514 +
13515 + pax_open_kernel();
13516 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13517 insn->op = op;
13518 + pax_close_kernel();
13519 }
13520
13521 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13522 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13523 kprobe_opcode_t opcode;
13524 kprobe_opcode_t *orig_opcodes = opcodes;
13525
13526 - if (search_exception_tables((unsigned long)opcodes))
13527 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13528 return 0; /* Page fault may occur on this address. */
13529
13530 retry:
13531 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13532 }
13533 }
13534 insn_get_length(&insn);
13535 + pax_open_kernel();
13536 memcpy(dest, insn.kaddr, insn.length);
13537 + pax_close_kernel();
13538
13539 #ifdef CONFIG_X86_64
13540 if (insn_rip_relative(&insn)) {
13541 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13542 (u8 *) dest;
13543 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13544 disp = (u8 *) dest + insn_offset_displacement(&insn);
13545 + pax_open_kernel();
13546 *(s32 *) disp = (s32) newdisp;
13547 + pax_close_kernel();
13548 }
13549 #endif
13550 return insn.length;
13551 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13552 */
13553 __copy_instruction(p->ainsn.insn, p->addr, 0);
13554
13555 - if (can_boost(p->addr))
13556 + if (can_boost(ktla_ktva(p->addr)))
13557 p->ainsn.boostable = 0;
13558 else
13559 p->ainsn.boostable = -1;
13560
13561 - p->opcode = *p->addr;
13562 + p->opcode = *(ktla_ktva(p->addr));
13563 }
13564
13565 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13566 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13567 * nor set current_kprobe, because it doesn't use single
13568 * stepping.
13569 */
13570 - regs->ip = (unsigned long)p->ainsn.insn;
13571 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13572 preempt_enable_no_resched();
13573 return;
13574 }
13575 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13576 if (p->opcode == BREAKPOINT_INSTRUCTION)
13577 regs->ip = (unsigned long)p->addr;
13578 else
13579 - regs->ip = (unsigned long)p->ainsn.insn;
13580 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13581 }
13582
13583 /*
13584 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13585 setup_singlestep(p, regs, kcb, 0);
13586 return 1;
13587 }
13588 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13589 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13590 /*
13591 * The breakpoint instruction was removed right
13592 * after we hit it. Another cpu has removed
13593 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13594 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13595 {
13596 unsigned long *tos = stack_addr(regs);
13597 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13598 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13599 unsigned long orig_ip = (unsigned long)p->addr;
13600 kprobe_opcode_t *insn = p->ainsn.insn;
13601
13602 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13603 struct die_args *args = data;
13604 int ret = NOTIFY_DONE;
13605
13606 - if (args->regs && user_mode_vm(args->regs))
13607 + if (args->regs && user_mode(args->regs))
13608 return ret;
13609
13610 switch (val) {
13611 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13612 * Verify if the address gap is in 2GB range, because this uses
13613 * a relative jump.
13614 */
13615 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13616 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13617 if (abs(rel) > 0x7fffffff)
13618 return -ERANGE;
13619
13620 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13621 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13622
13623 /* Set probe function call */
13624 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13625 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13626
13627 /* Set returning jmp instruction at the tail of out-of-line buffer */
13628 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13629 - (u8 *)op->kp.addr + op->optinsn.size);
13630 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13631
13632 flush_icache_range((unsigned long) buf,
13633 (unsigned long) buf + TMPL_END_IDX +
13634 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13635 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13636
13637 /* Backup instructions which will be replaced by jump address */
13638 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13639 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13640 RELATIVE_ADDR_SIZE);
13641
13642 insn_buf[0] = RELATIVEJUMP_OPCODE;
13643 diff -urNp linux-3.0.4/arch/x86/kernel/kvm.c linux-3.0.4/arch/x86/kernel/kvm.c
13644 --- linux-3.0.4/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13645 +++ linux-3.0.4/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13646 @@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13647 pv_mmu_ops.set_pud = kvm_set_pud;
13648 #if PAGETABLE_LEVELS == 4
13649 pv_mmu_ops.set_pgd = kvm_set_pgd;
13650 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13651 #endif
13652 #endif
13653 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13654 diff -urNp linux-3.0.4/arch/x86/kernel/ldt.c linux-3.0.4/arch/x86/kernel/ldt.c
13655 --- linux-3.0.4/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13656 +++ linux-3.0.4/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13657 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13658 if (reload) {
13659 #ifdef CONFIG_SMP
13660 preempt_disable();
13661 - load_LDT(pc);
13662 + load_LDT_nolock(pc);
13663 if (!cpumask_equal(mm_cpumask(current->mm),
13664 cpumask_of(smp_processor_id())))
13665 smp_call_function(flush_ldt, current->mm, 1);
13666 preempt_enable();
13667 #else
13668 - load_LDT(pc);
13669 + load_LDT_nolock(pc);
13670 #endif
13671 }
13672 if (oldsize) {
13673 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13674 return err;
13675
13676 for (i = 0; i < old->size; i++)
13677 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13678 + write_ldt_entry(new->ldt, i, old->ldt + i);
13679 return 0;
13680 }
13681
13682 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13683 retval = copy_ldt(&mm->context, &old_mm->context);
13684 mutex_unlock(&old_mm->context.lock);
13685 }
13686 +
13687 + if (tsk == current) {
13688 + mm->context.vdso = 0;
13689 +
13690 +#ifdef CONFIG_X86_32
13691 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13692 + mm->context.user_cs_base = 0UL;
13693 + mm->context.user_cs_limit = ~0UL;
13694 +
13695 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13696 + cpus_clear(mm->context.cpu_user_cs_mask);
13697 +#endif
13698 +
13699 +#endif
13700 +#endif
13701 +
13702 + }
13703 +
13704 return retval;
13705 }
13706
13707 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13708 }
13709 }
13710
13711 +#ifdef CONFIG_PAX_SEGMEXEC
13712 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13713 + error = -EINVAL;
13714 + goto out_unlock;
13715 + }
13716 +#endif
13717 +
13718 fill_ldt(&ldt, &ldt_info);
13719 if (oldmode)
13720 ldt.avl = 0;
13721 diff -urNp linux-3.0.4/arch/x86/kernel/machine_kexec_32.c linux-3.0.4/arch/x86/kernel/machine_kexec_32.c
13722 --- linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
13723 +++ linux-3.0.4/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
13724 @@ -27,7 +27,7 @@
13725 #include <asm/cacheflush.h>
13726 #include <asm/debugreg.h>
13727
13728 -static void set_idt(void *newidt, __u16 limit)
13729 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13730 {
13731 struct desc_ptr curidt;
13732
13733 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13734 }
13735
13736
13737 -static void set_gdt(void *newgdt, __u16 limit)
13738 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13739 {
13740 struct desc_ptr curgdt;
13741
13742 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13743 }
13744
13745 control_page = page_address(image->control_code_page);
13746 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13747 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13748
13749 relocate_kernel_ptr = control_page;
13750 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13751 diff -urNp linux-3.0.4/arch/x86/kernel/microcode_intel.c linux-3.0.4/arch/x86/kernel/microcode_intel.c
13752 --- linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
13753 +++ linux-3.0.4/arch/x86/kernel/microcode_intel.c 2011-08-23 21:47:55.000000000 -0400
13754 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13755
13756 static int get_ucode_user(void *to, const void *from, size_t n)
13757 {
13758 - return copy_from_user(to, from, n);
13759 + return copy_from_user(to, (__force const void __user *)from, n);
13760 }
13761
13762 static enum ucode_state
13763 request_microcode_user(int cpu, const void __user *buf, size_t size)
13764 {
13765 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13766 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13767 }
13768
13769 static void microcode_fini_cpu(int cpu)
13770 diff -urNp linux-3.0.4/arch/x86/kernel/module.c linux-3.0.4/arch/x86/kernel/module.c
13771 --- linux-3.0.4/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
13772 +++ linux-3.0.4/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
13773 @@ -36,21 +36,66 @@
13774 #define DEBUGP(fmt...)
13775 #endif
13776
13777 -void *module_alloc(unsigned long size)
13778 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13779 {
13780 if (PAGE_ALIGN(size) > MODULES_LEN)
13781 return NULL;
13782 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13783 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13784 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13785 -1, __builtin_return_address(0));
13786 }
13787
13788 +void *module_alloc(unsigned long size)
13789 +{
13790 +
13791 +#ifdef CONFIG_PAX_KERNEXEC
13792 + return __module_alloc(size, PAGE_KERNEL);
13793 +#else
13794 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13795 +#endif
13796 +
13797 +}
13798 +
13799 /* Free memory returned from module_alloc */
13800 void module_free(struct module *mod, void *module_region)
13801 {
13802 vfree(module_region);
13803 }
13804
13805 +#ifdef CONFIG_PAX_KERNEXEC
13806 +#ifdef CONFIG_X86_32
13807 +void *module_alloc_exec(unsigned long size)
13808 +{
13809 + struct vm_struct *area;
13810 +
13811 + if (size == 0)
13812 + return NULL;
13813 +
13814 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13815 + return area ? area->addr : NULL;
13816 +}
13817 +EXPORT_SYMBOL(module_alloc_exec);
13818 +
13819 +void module_free_exec(struct module *mod, void *module_region)
13820 +{
13821 + vunmap(module_region);
13822 +}
13823 +EXPORT_SYMBOL(module_free_exec);
13824 +#else
13825 +void module_free_exec(struct module *mod, void *module_region)
13826 +{
13827 + module_free(mod, module_region);
13828 +}
13829 +EXPORT_SYMBOL(module_free_exec);
13830 +
13831 +void *module_alloc_exec(unsigned long size)
13832 +{
13833 + return __module_alloc(size, PAGE_KERNEL_RX);
13834 +}
13835 +EXPORT_SYMBOL(module_alloc_exec);
13836 +#endif
13837 +#endif
13838 +
13839 /* We don't need anything special. */
13840 int module_frob_arch_sections(Elf_Ehdr *hdr,
13841 Elf_Shdr *sechdrs,
13842 @@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13843 unsigned int i;
13844 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13845 Elf32_Sym *sym;
13846 - uint32_t *location;
13847 + uint32_t *plocation, location;
13848
13849 DEBUGP("Applying relocate section %u to %u\n", relsec,
13850 sechdrs[relsec].sh_info);
13851 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13852 /* This is where to make the change */
13853 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13854 - + rel[i].r_offset;
13855 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13856 + location = (uint32_t)plocation;
13857 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13858 + plocation = ktla_ktva((void *)plocation);
13859 /* This is the symbol it is referring to. Note that all
13860 undefined symbols have been resolved. */
13861 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13862 @@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13863 switch (ELF32_R_TYPE(rel[i].r_info)) {
13864 case R_386_32:
13865 /* We add the value into the location given */
13866 - *location += sym->st_value;
13867 + pax_open_kernel();
13868 + *plocation += sym->st_value;
13869 + pax_close_kernel();
13870 break;
13871 case R_386_PC32:
13872 /* Add the value, subtract its postition */
13873 - *location += sym->st_value - (uint32_t)location;
13874 + pax_open_kernel();
13875 + *plocation += sym->st_value - location;
13876 + pax_close_kernel();
13877 break;
13878 default:
13879 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13880 @@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13881 case R_X86_64_NONE:
13882 break;
13883 case R_X86_64_64:
13884 + pax_open_kernel();
13885 *(u64 *)loc = val;
13886 + pax_close_kernel();
13887 break;
13888 case R_X86_64_32:
13889 + pax_open_kernel();
13890 *(u32 *)loc = val;
13891 + pax_close_kernel();
13892 if (val != *(u32 *)loc)
13893 goto overflow;
13894 break;
13895 case R_X86_64_32S:
13896 + pax_open_kernel();
13897 *(s32 *)loc = val;
13898 + pax_close_kernel();
13899 if ((s64)val != *(s32 *)loc)
13900 goto overflow;
13901 break;
13902 case R_X86_64_PC32:
13903 val -= (u64)loc;
13904 + pax_open_kernel();
13905 *(u32 *)loc = val;
13906 + pax_close_kernel();
13907 +
13908 #if 0
13909 if ((s64)val != *(s32 *)loc)
13910 goto overflow;
13911 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt.c linux-3.0.4/arch/x86/kernel/paravirt.c
13912 --- linux-3.0.4/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
13913 +++ linux-3.0.4/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
13914 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13915 {
13916 return x;
13917 }
13918 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13919 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13920 +#endif
13921
13922 void __init default_banner(void)
13923 {
13924 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13925 * corresponding structure. */
13926 static void *get_call_destination(u8 type)
13927 {
13928 - struct paravirt_patch_template tmpl = {
13929 + const struct paravirt_patch_template tmpl = {
13930 .pv_init_ops = pv_init_ops,
13931 .pv_time_ops = pv_time_ops,
13932 .pv_cpu_ops = pv_cpu_ops,
13933 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13934 .pv_lock_ops = pv_lock_ops,
13935 #endif
13936 };
13937 +
13938 + pax_track_stack();
13939 +
13940 return *((void **)&tmpl + type);
13941 }
13942
13943 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13944 if (opfunc == NULL)
13945 /* If there's no function, patch it with a ud2a (BUG) */
13946 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13947 - else if (opfunc == _paravirt_nop)
13948 + else if (opfunc == (void *)_paravirt_nop)
13949 /* If the operation is a nop, then nop the callsite */
13950 ret = paravirt_patch_nop();
13951
13952 /* identity functions just return their single argument */
13953 - else if (opfunc == _paravirt_ident_32)
13954 + else if (opfunc == (void *)_paravirt_ident_32)
13955 ret = paravirt_patch_ident_32(insnbuf, len);
13956 - else if (opfunc == _paravirt_ident_64)
13957 + else if (opfunc == (void *)_paravirt_ident_64)
13958 ret = paravirt_patch_ident_64(insnbuf, len);
13959 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13960 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13961 + ret = paravirt_patch_ident_64(insnbuf, len);
13962 +#endif
13963
13964 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13965 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13966 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13967 if (insn_len > len || start == NULL)
13968 insn_len = len;
13969 else
13970 - memcpy(insnbuf, start, insn_len);
13971 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13972
13973 return insn_len;
13974 }
13975 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13976 preempt_enable();
13977 }
13978
13979 -struct pv_info pv_info = {
13980 +struct pv_info pv_info __read_only = {
13981 .name = "bare hardware",
13982 .paravirt_enabled = 0,
13983 .kernel_rpl = 0,
13984 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13985 };
13986
13987 -struct pv_init_ops pv_init_ops = {
13988 +struct pv_init_ops pv_init_ops __read_only = {
13989 .patch = native_patch,
13990 };
13991
13992 -struct pv_time_ops pv_time_ops = {
13993 +struct pv_time_ops pv_time_ops __read_only = {
13994 .sched_clock = native_sched_clock,
13995 };
13996
13997 -struct pv_irq_ops pv_irq_ops = {
13998 +struct pv_irq_ops pv_irq_ops __read_only = {
13999 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14000 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14001 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14002 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14003 #endif
14004 };
14005
14006 -struct pv_cpu_ops pv_cpu_ops = {
14007 +struct pv_cpu_ops pv_cpu_ops __read_only = {
14008 .cpuid = native_cpuid,
14009 .get_debugreg = native_get_debugreg,
14010 .set_debugreg = native_set_debugreg,
14011 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14012 .end_context_switch = paravirt_nop,
14013 };
14014
14015 -struct pv_apic_ops pv_apic_ops = {
14016 +struct pv_apic_ops pv_apic_ops __read_only = {
14017 #ifdef CONFIG_X86_LOCAL_APIC
14018 .startup_ipi_hook = paravirt_nop,
14019 #endif
14020 };
14021
14022 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14023 +#ifdef CONFIG_X86_32
14024 +#ifdef CONFIG_X86_PAE
14025 +/* 64-bit pagetable entries */
14026 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14027 +#else
14028 /* 32-bit pagetable entries */
14029 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14030 +#endif
14031 #else
14032 /* 64-bit pagetable entries */
14033 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14034 #endif
14035
14036 -struct pv_mmu_ops pv_mmu_ops = {
14037 +struct pv_mmu_ops pv_mmu_ops __read_only = {
14038
14039 .read_cr2 = native_read_cr2,
14040 .write_cr2 = native_write_cr2,
14041 @@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14042 .make_pud = PTE_IDENT,
14043
14044 .set_pgd = native_set_pgd,
14045 + .set_pgd_batched = native_set_pgd_batched,
14046 #endif
14047 #endif /* PAGETABLE_LEVELS >= 3 */
14048
14049 @@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14050 },
14051
14052 .set_fixmap = native_set_fixmap,
14053 +
14054 +#ifdef CONFIG_PAX_KERNEXEC
14055 + .pax_open_kernel = native_pax_open_kernel,
14056 + .pax_close_kernel = native_pax_close_kernel,
14057 +#endif
14058 +
14059 };
14060
14061 EXPORT_SYMBOL_GPL(pv_time_ops);
14062 diff -urNp linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c
14063 --- linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14064 +++ linux-3.0.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14065 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14066 arch_spin_lock(lock);
14067 }
14068
14069 -struct pv_lock_ops pv_lock_ops = {
14070 +struct pv_lock_ops pv_lock_ops __read_only = {
14071 #ifdef CONFIG_SMP
14072 .spin_is_locked = __ticket_spin_is_locked,
14073 .spin_is_contended = __ticket_spin_is_contended,
14074 diff -urNp linux-3.0.4/arch/x86/kernel/pci-iommu_table.c linux-3.0.4/arch/x86/kernel/pci-iommu_table.c
14075 --- linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14076 +++ linux-3.0.4/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14077 @@ -2,7 +2,7 @@
14078 #include <asm/iommu_table.h>
14079 #include <linux/string.h>
14080 #include <linux/kallsyms.h>
14081 -
14082 +#include <linux/sched.h>
14083
14084 #define DEBUG 1
14085
14086 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14087 {
14088 struct iommu_table_entry *p, *q, *x;
14089
14090 + pax_track_stack();
14091 +
14092 /* Simple cyclic dependency checker. */
14093 for (p = start; p < finish; p++) {
14094 q = find_dependents_of(start, finish, p);
14095 diff -urNp linux-3.0.4/arch/x86/kernel/process_32.c linux-3.0.4/arch/x86/kernel/process_32.c
14096 --- linux-3.0.4/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14097 +++ linux-3.0.4/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14098 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14099 unsigned long thread_saved_pc(struct task_struct *tsk)
14100 {
14101 return ((unsigned long *)tsk->thread.sp)[3];
14102 +//XXX return tsk->thread.eip;
14103 }
14104
14105 #ifndef CONFIG_SMP
14106 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14107 unsigned long sp;
14108 unsigned short ss, gs;
14109
14110 - if (user_mode_vm(regs)) {
14111 + if (user_mode(regs)) {
14112 sp = regs->sp;
14113 ss = regs->ss & 0xffff;
14114 - gs = get_user_gs(regs);
14115 } else {
14116 sp = kernel_stack_pointer(regs);
14117 savesegment(ss, ss);
14118 - savesegment(gs, gs);
14119 }
14120 + gs = get_user_gs(regs);
14121
14122 show_regs_common();
14123
14124 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14125 struct task_struct *tsk;
14126 int err;
14127
14128 - childregs = task_pt_regs(p);
14129 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14130 *childregs = *regs;
14131 childregs->ax = 0;
14132 childregs->sp = sp;
14133
14134 p->thread.sp = (unsigned long) childregs;
14135 p->thread.sp0 = (unsigned long) (childregs+1);
14136 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14137
14138 p->thread.ip = (unsigned long) ret_from_fork;
14139
14140 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14141 struct thread_struct *prev = &prev_p->thread,
14142 *next = &next_p->thread;
14143 int cpu = smp_processor_id();
14144 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14145 + struct tss_struct *tss = init_tss + cpu;
14146 bool preload_fpu;
14147
14148 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14149 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14150 */
14151 lazy_save_gs(prev->gs);
14152
14153 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14154 + __set_fs(task_thread_info(next_p)->addr_limit);
14155 +#endif
14156 +
14157 /*
14158 * Load the per-thread Thread-Local Storage descriptor.
14159 */
14160 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14161 */
14162 arch_end_context_switch(next_p);
14163
14164 + percpu_write(current_task, next_p);
14165 + percpu_write(current_tinfo, &next_p->tinfo);
14166 +
14167 if (preload_fpu)
14168 __math_state_restore();
14169
14170 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14171 if (prev->gs | next->gs)
14172 lazy_load_gs(next->gs);
14173
14174 - percpu_write(current_task, next_p);
14175 -
14176 return prev_p;
14177 }
14178
14179 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14180 } while (count++ < 16);
14181 return 0;
14182 }
14183 -
14184 diff -urNp linux-3.0.4/arch/x86/kernel/process_64.c linux-3.0.4/arch/x86/kernel/process_64.c
14185 --- linux-3.0.4/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14186 +++ linux-3.0.4/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14187 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14188 void exit_idle(void)
14189 {
14190 /* idle loop has pid 0 */
14191 - if (current->pid)
14192 + if (task_pid_nr(current))
14193 return;
14194 __exit_idle();
14195 }
14196 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14197 struct pt_regs *childregs;
14198 struct task_struct *me = current;
14199
14200 - childregs = ((struct pt_regs *)
14201 - (THREAD_SIZE + task_stack_page(p))) - 1;
14202 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14203 *childregs = *regs;
14204
14205 childregs->ax = 0;
14206 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14207 p->thread.sp = (unsigned long) childregs;
14208 p->thread.sp0 = (unsigned long) (childregs+1);
14209 p->thread.usersp = me->thread.usersp;
14210 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14211
14212 set_tsk_thread_flag(p, TIF_FORK);
14213
14214 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14215 struct thread_struct *prev = &prev_p->thread;
14216 struct thread_struct *next = &next_p->thread;
14217 int cpu = smp_processor_id();
14218 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14219 + struct tss_struct *tss = init_tss + cpu;
14220 unsigned fsindex, gsindex;
14221 bool preload_fpu;
14222
14223 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14224 prev->usersp = percpu_read(old_rsp);
14225 percpu_write(old_rsp, next->usersp);
14226 percpu_write(current_task, next_p);
14227 + percpu_write(current_tinfo, &next_p->tinfo);
14228
14229 - percpu_write(kernel_stack,
14230 - (unsigned long)task_stack_page(next_p) +
14231 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14232 + percpu_write(kernel_stack, next->sp0);
14233
14234 /*
14235 * Now maybe reload the debug registers and handle I/O bitmaps
14236 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14237 if (!p || p == current || p->state == TASK_RUNNING)
14238 return 0;
14239 stack = (unsigned long)task_stack_page(p);
14240 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14241 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14242 return 0;
14243 fp = *(u64 *)(p->thread.sp);
14244 do {
14245 - if (fp < (unsigned long)stack ||
14246 - fp >= (unsigned long)stack+THREAD_SIZE)
14247 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14248 return 0;
14249 ip = *(u64 *)(fp+8);
14250 if (!in_sched_functions(ip))
14251 diff -urNp linux-3.0.4/arch/x86/kernel/process.c linux-3.0.4/arch/x86/kernel/process.c
14252 --- linux-3.0.4/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14253 +++ linux-3.0.4/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14254 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14255
14256 void free_thread_info(struct thread_info *ti)
14257 {
14258 - free_thread_xstate(ti->task);
14259 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14260 }
14261
14262 +static struct kmem_cache *task_struct_cachep;
14263 +
14264 void arch_task_cache_init(void)
14265 {
14266 - task_xstate_cachep =
14267 - kmem_cache_create("task_xstate", xstate_size,
14268 + /* create a slab on which task_structs can be allocated */
14269 + task_struct_cachep =
14270 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14271 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14272 +
14273 + task_xstate_cachep =
14274 + kmem_cache_create("task_xstate", xstate_size,
14275 __alignof__(union thread_xstate),
14276 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14277 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14278 +}
14279 +
14280 +struct task_struct *alloc_task_struct_node(int node)
14281 +{
14282 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14283 +}
14284 +
14285 +void free_task_struct(struct task_struct *task)
14286 +{
14287 + free_thread_xstate(task);
14288 + kmem_cache_free(task_struct_cachep, task);
14289 }
14290
14291 /*
14292 @@ -70,7 +87,7 @@ void exit_thread(void)
14293 unsigned long *bp = t->io_bitmap_ptr;
14294
14295 if (bp) {
14296 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14297 + struct tss_struct *tss = init_tss + get_cpu();
14298
14299 t->io_bitmap_ptr = NULL;
14300 clear_thread_flag(TIF_IO_BITMAP);
14301 @@ -106,7 +123,7 @@ void show_regs_common(void)
14302
14303 printk(KERN_CONT "\n");
14304 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14305 - current->pid, current->comm, print_tainted(),
14306 + task_pid_nr(current), current->comm, print_tainted(),
14307 init_utsname()->release,
14308 (int)strcspn(init_utsname()->version, " "),
14309 init_utsname()->version);
14310 @@ -120,6 +137,9 @@ void flush_thread(void)
14311 {
14312 struct task_struct *tsk = current;
14313
14314 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14315 + loadsegment(gs, 0);
14316 +#endif
14317 flush_ptrace_hw_breakpoint(tsk);
14318 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14319 /*
14320 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14321 regs.di = (unsigned long) arg;
14322
14323 #ifdef CONFIG_X86_32
14324 - regs.ds = __USER_DS;
14325 - regs.es = __USER_DS;
14326 + regs.ds = __KERNEL_DS;
14327 + regs.es = __KERNEL_DS;
14328 regs.fs = __KERNEL_PERCPU;
14329 - regs.gs = __KERNEL_STACK_CANARY;
14330 + savesegment(gs, regs.gs);
14331 #else
14332 regs.ss = __KERNEL_DS;
14333 #endif
14334 @@ -403,7 +423,7 @@ void default_idle(void)
14335 EXPORT_SYMBOL(default_idle);
14336 #endif
14337
14338 -void stop_this_cpu(void *dummy)
14339 +__noreturn void stop_this_cpu(void *dummy)
14340 {
14341 local_irq_disable();
14342 /*
14343 @@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14344 }
14345 early_param("idle", idle_setup);
14346
14347 -unsigned long arch_align_stack(unsigned long sp)
14348 +#ifdef CONFIG_PAX_RANDKSTACK
14349 +void pax_randomize_kstack(struct pt_regs *regs)
14350 {
14351 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14352 - sp -= get_random_int() % 8192;
14353 - return sp & ~0xf;
14354 -}
14355 + struct thread_struct *thread = &current->thread;
14356 + unsigned long time;
14357
14358 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14359 -{
14360 - unsigned long range_end = mm->brk + 0x02000000;
14361 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14362 -}
14363 + if (!randomize_va_space)
14364 + return;
14365 +
14366 + if (v8086_mode(regs))
14367 + return;
14368
14369 + rdtscl(time);
14370 +
14371 + /* P4 seems to return a 0 LSB, ignore it */
14372 +#ifdef CONFIG_MPENTIUM4
14373 + time &= 0x3EUL;
14374 + time <<= 2;
14375 +#elif defined(CONFIG_X86_64)
14376 + time &= 0xFUL;
14377 + time <<= 4;
14378 +#else
14379 + time &= 0x1FUL;
14380 + time <<= 3;
14381 +#endif
14382 +
14383 + thread->sp0 ^= time;
14384 + load_sp0(init_tss + smp_processor_id(), thread);
14385 +
14386 +#ifdef CONFIG_X86_64
14387 + percpu_write(kernel_stack, thread->sp0);
14388 +#endif
14389 +}
14390 +#endif
14391 diff -urNp linux-3.0.4/arch/x86/kernel/ptrace.c linux-3.0.4/arch/x86/kernel/ptrace.c
14392 --- linux-3.0.4/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14393 +++ linux-3.0.4/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14394 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14395 unsigned long addr, unsigned long data)
14396 {
14397 int ret;
14398 - unsigned long __user *datap = (unsigned long __user *)data;
14399 + unsigned long __user *datap = (__force unsigned long __user *)data;
14400
14401 switch (request) {
14402 /* read the word at location addr in the USER area. */
14403 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14404 if ((int) addr < 0)
14405 return -EIO;
14406 ret = do_get_thread_area(child, addr,
14407 - (struct user_desc __user *)data);
14408 + (__force struct user_desc __user *) data);
14409 break;
14410
14411 case PTRACE_SET_THREAD_AREA:
14412 if ((int) addr < 0)
14413 return -EIO;
14414 ret = do_set_thread_area(child, addr,
14415 - (struct user_desc __user *)data, 0);
14416 + (__force struct user_desc __user *) data, 0);
14417 break;
14418 #endif
14419
14420 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14421 memset(info, 0, sizeof(*info));
14422 info->si_signo = SIGTRAP;
14423 info->si_code = si_code;
14424 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14425 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14426 }
14427
14428 void user_single_step_siginfo(struct task_struct *tsk,
14429 diff -urNp linux-3.0.4/arch/x86/kernel/pvclock.c linux-3.0.4/arch/x86/kernel/pvclock.c
14430 --- linux-3.0.4/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14431 +++ linux-3.0.4/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14432 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14433 return pv_tsc_khz;
14434 }
14435
14436 -static atomic64_t last_value = ATOMIC64_INIT(0);
14437 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14438
14439 void pvclock_resume(void)
14440 {
14441 - atomic64_set(&last_value, 0);
14442 + atomic64_set_unchecked(&last_value, 0);
14443 }
14444
14445 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14446 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14447 * updating at the same time, and one of them could be slightly behind,
14448 * making the assumption that last_value always go forward fail to hold.
14449 */
14450 - last = atomic64_read(&last_value);
14451 + last = atomic64_read_unchecked(&last_value);
14452 do {
14453 if (ret < last)
14454 return last;
14455 - last = atomic64_cmpxchg(&last_value, last, ret);
14456 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14457 } while (unlikely(last != ret));
14458
14459 return ret;
14460 diff -urNp linux-3.0.4/arch/x86/kernel/reboot.c linux-3.0.4/arch/x86/kernel/reboot.c
14461 --- linux-3.0.4/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14462 +++ linux-3.0.4/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14463 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14464 EXPORT_SYMBOL(pm_power_off);
14465
14466 static const struct desc_ptr no_idt = {};
14467 -static int reboot_mode;
14468 +static unsigned short reboot_mode;
14469 enum reboot_type reboot_type = BOOT_ACPI;
14470 int reboot_force;
14471
14472 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
14473 extern const unsigned char machine_real_restart_asm[];
14474 extern const u64 machine_real_restart_gdt[3];
14475
14476 -void machine_real_restart(unsigned int type)
14477 +__noreturn void machine_real_restart(unsigned int type)
14478 {
14479 void *restart_va;
14480 unsigned long restart_pa;
14481 - void (*restart_lowmem)(unsigned int);
14482 + void (* __noreturn restart_lowmem)(unsigned int);
14483 u64 *lowmem_gdt;
14484
14485 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14486 + struct desc_struct *gdt;
14487 +#endif
14488 +
14489 local_irq_disable();
14490
14491 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14492 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14493 boot)". This seems like a fairly standard thing that gets set by
14494 REBOOT.COM programs, and the previous reset routine did this
14495 too. */
14496 - *((unsigned short *)0x472) = reboot_mode;
14497 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14498
14499 /* Patch the GDT in the low memory trampoline */
14500 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14501
14502 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14503 restart_pa = virt_to_phys(restart_va);
14504 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14505 + restart_lowmem = (void *)restart_pa;
14506
14507 /* GDT[0]: GDT self-pointer */
14508 lowmem_gdt[0] =
14509 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14510 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14511
14512 /* Jump to the identity-mapped low memory code */
14513 +
14514 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14515 + gdt = get_cpu_gdt_table(smp_processor_id());
14516 + pax_open_kernel();
14517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14518 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14519 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14520 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14521 +#endif
14522 +#ifdef CONFIG_PAX_KERNEXEC
14523 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14524 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14525 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14526 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14527 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14528 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14529 +#endif
14530 + pax_close_kernel();
14531 +#endif
14532 +
14533 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14534 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14535 + unreachable();
14536 +#else
14537 restart_lowmem(type);
14538 +#endif
14539 +
14540 }
14541 #ifdef CONFIG_APM_MODULE
14542 EXPORT_SYMBOL(machine_real_restart);
14543 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14544 * try to force a triple fault and then cycle between hitting the keyboard
14545 * controller and doing that
14546 */
14547 -static void native_machine_emergency_restart(void)
14548 +__noreturn static void native_machine_emergency_restart(void)
14549 {
14550 int i;
14551 int attempt = 0;
14552 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14553 #endif
14554 }
14555
14556 -static void __machine_emergency_restart(int emergency)
14557 +static __noreturn void __machine_emergency_restart(int emergency)
14558 {
14559 reboot_emergency = emergency;
14560 machine_ops.emergency_restart();
14561 }
14562
14563 -static void native_machine_restart(char *__unused)
14564 +static __noreturn void native_machine_restart(char *__unused)
14565 {
14566 printk("machine restart\n");
14567
14568 @@ -662,7 +692,7 @@ static void native_machine_restart(char
14569 __machine_emergency_restart(0);
14570 }
14571
14572 -static void native_machine_halt(void)
14573 +static __noreturn void native_machine_halt(void)
14574 {
14575 /* stop other cpus and apics */
14576 machine_shutdown();
14577 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
14578 stop_this_cpu(NULL);
14579 }
14580
14581 -static void native_machine_power_off(void)
14582 +__noreturn static void native_machine_power_off(void)
14583 {
14584 if (pm_power_off) {
14585 if (!reboot_force)
14586 @@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14587 }
14588 /* a fallback in case there is no PM info available */
14589 tboot_shutdown(TB_SHUTDOWN_HALT);
14590 + unreachable();
14591 }
14592
14593 struct machine_ops machine_ops = {
14594 diff -urNp linux-3.0.4/arch/x86/kernel/setup.c linux-3.0.4/arch/x86/kernel/setup.c
14595 --- linux-3.0.4/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14596 +++ linux-3.0.4/arch/x86/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
14597 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14598 * area (640->1Mb) as ram even though it is not.
14599 * take them out.
14600 */
14601 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14602 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14603 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14604 }
14605
14606 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14607
14608 if (!boot_params.hdr.root_flags)
14609 root_mountflags &= ~MS_RDONLY;
14610 - init_mm.start_code = (unsigned long) _text;
14611 - init_mm.end_code = (unsigned long) _etext;
14612 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14613 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14614 init_mm.end_data = (unsigned long) _edata;
14615 init_mm.brk = _brk_end;
14616
14617 - code_resource.start = virt_to_phys(_text);
14618 - code_resource.end = virt_to_phys(_etext)-1;
14619 - data_resource.start = virt_to_phys(_etext);
14620 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14621 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14622 + data_resource.start = virt_to_phys(_sdata);
14623 data_resource.end = virt_to_phys(_edata)-1;
14624 bss_resource.start = virt_to_phys(&__bss_start);
14625 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14626 diff -urNp linux-3.0.4/arch/x86/kernel/setup_percpu.c linux-3.0.4/arch/x86/kernel/setup_percpu.c
14627 --- linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14628 +++ linux-3.0.4/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14629 @@ -21,19 +21,17 @@
14630 #include <asm/cpu.h>
14631 #include <asm/stackprotector.h>
14632
14633 -DEFINE_PER_CPU(int, cpu_number);
14634 +#ifdef CONFIG_SMP
14635 +DEFINE_PER_CPU(unsigned int, cpu_number);
14636 EXPORT_PER_CPU_SYMBOL(cpu_number);
14637 +#endif
14638
14639 -#ifdef CONFIG_X86_64
14640 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14641 -#else
14642 -#define BOOT_PERCPU_OFFSET 0
14643 -#endif
14644
14645 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14646 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14647
14648 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14649 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14650 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14651 };
14652 EXPORT_SYMBOL(__per_cpu_offset);
14653 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14654 {
14655 #ifdef CONFIG_X86_32
14656 struct desc_struct gdt;
14657 + unsigned long base = per_cpu_offset(cpu);
14658
14659 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14660 - 0x2 | DESCTYPE_S, 0x8);
14661 - gdt.s = 1;
14662 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14663 + 0x83 | DESCTYPE_S, 0xC);
14664 write_gdt_entry(get_cpu_gdt_table(cpu),
14665 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14666 #endif
14667 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14668 /* alrighty, percpu areas up and running */
14669 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14670 for_each_possible_cpu(cpu) {
14671 +#ifdef CONFIG_CC_STACKPROTECTOR
14672 +#ifdef CONFIG_X86_32
14673 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14674 +#endif
14675 +#endif
14676 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14677 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14678 per_cpu(cpu_number, cpu) = cpu;
14679 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14680 */
14681 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14682 #endif
14683 +#ifdef CONFIG_CC_STACKPROTECTOR
14684 +#ifdef CONFIG_X86_32
14685 + if (!cpu)
14686 + per_cpu(stack_canary.canary, cpu) = canary;
14687 +#endif
14688 +#endif
14689 /*
14690 * Up to this point, the boot CPU has been using .init.data
14691 * area. Reload any changed state for the boot CPU.
14692 diff -urNp linux-3.0.4/arch/x86/kernel/signal.c linux-3.0.4/arch/x86/kernel/signal.c
14693 --- linux-3.0.4/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
14694 +++ linux-3.0.4/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
14695 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14696 * Align the stack pointer according to the i386 ABI,
14697 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14698 */
14699 - sp = ((sp + 4) & -16ul) - 4;
14700 + sp = ((sp - 12) & -16ul) - 4;
14701 #else /* !CONFIG_X86_32 */
14702 sp = round_down(sp, 16) - 8;
14703 #endif
14704 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14705 * Return an always-bogus address instead so we will die with SIGSEGV.
14706 */
14707 if (onsigstack && !likely(on_sig_stack(sp)))
14708 - return (void __user *)-1L;
14709 + return (__force void __user *)-1L;
14710
14711 /* save i387 state */
14712 if (used_math() && save_i387_xstate(*fpstate) < 0)
14713 - return (void __user *)-1L;
14714 + return (__force void __user *)-1L;
14715
14716 return (void __user *)sp;
14717 }
14718 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14719 }
14720
14721 if (current->mm->context.vdso)
14722 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14723 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14724 else
14725 - restorer = &frame->retcode;
14726 + restorer = (void __user *)&frame->retcode;
14727 if (ka->sa.sa_flags & SA_RESTORER)
14728 restorer = ka->sa.sa_restorer;
14729
14730 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14731 * reasons and because gdb uses it as a signature to notice
14732 * signal handler stack frames.
14733 */
14734 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14735 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14736
14737 if (err)
14738 return -EFAULT;
14739 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14740 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14741
14742 /* Set up to return from userspace. */
14743 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14744 + if (current->mm->context.vdso)
14745 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14746 + else
14747 + restorer = (void __user *)&frame->retcode;
14748 if (ka->sa.sa_flags & SA_RESTORER)
14749 restorer = ka->sa.sa_restorer;
14750 put_user_ex(restorer, &frame->pretcode);
14751 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14752 * reasons and because gdb uses it as a signature to notice
14753 * signal handler stack frames.
14754 */
14755 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14756 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14757 } put_user_catch(err);
14758
14759 if (err)
14760 @@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
14761 int signr;
14762 sigset_t *oldset;
14763
14764 + pax_track_stack();
14765 +
14766 /*
14767 * We want the common case to go fast, which is why we may in certain
14768 * cases get here from kernel mode. Just return without doing anything
14769 @@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
14770 * X86_32: vm86 regs switched out by assembly code before reaching
14771 * here, so testing against kernel CS suffices.
14772 */
14773 - if (!user_mode(regs))
14774 + if (!user_mode_novm(regs))
14775 return;
14776
14777 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14778 diff -urNp linux-3.0.4/arch/x86/kernel/smpboot.c linux-3.0.4/arch/x86/kernel/smpboot.c
14779 --- linux-3.0.4/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
14780 +++ linux-3.0.4/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
14781 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14782 set_idle_for_cpu(cpu, c_idle.idle);
14783 do_rest:
14784 per_cpu(current_task, cpu) = c_idle.idle;
14785 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14786 #ifdef CONFIG_X86_32
14787 /* Stack for startup_32 can be just as for start_secondary onwards */
14788 irq_ctx_init(cpu);
14789 #else
14790 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14791 initial_gs = per_cpu_offset(cpu);
14792 - per_cpu(kernel_stack, cpu) =
14793 - (unsigned long)task_stack_page(c_idle.idle) -
14794 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14795 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14796 #endif
14797 +
14798 + pax_open_kernel();
14799 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14800 + pax_close_kernel();
14801 +
14802 initial_code = (unsigned long)start_secondary;
14803 stack_start = c_idle.idle->thread.sp;
14804
14805 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14806
14807 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14808
14809 +#ifdef CONFIG_PAX_PER_CPU_PGD
14810 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14811 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14812 + KERNEL_PGD_PTRS);
14813 +#endif
14814 +
14815 err = do_boot_cpu(apicid, cpu);
14816 if (err) {
14817 pr_debug("do_boot_cpu failed %d\n", err);
14818 diff -urNp linux-3.0.4/arch/x86/kernel/step.c linux-3.0.4/arch/x86/kernel/step.c
14819 --- linux-3.0.4/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
14820 +++ linux-3.0.4/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
14821 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14822 struct desc_struct *desc;
14823 unsigned long base;
14824
14825 - seg &= ~7UL;
14826 + seg >>= 3;
14827
14828 mutex_lock(&child->mm->context.lock);
14829 - if (unlikely((seg >> 3) >= child->mm->context.size))
14830 + if (unlikely(seg >= child->mm->context.size))
14831 addr = -1L; /* bogus selector, access would fault */
14832 else {
14833 desc = child->mm->context.ldt + seg;
14834 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14835 addr += base;
14836 }
14837 mutex_unlock(&child->mm->context.lock);
14838 - }
14839 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14840 + addr = ktla_ktva(addr);
14841
14842 return addr;
14843 }
14844 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14845 unsigned char opcode[15];
14846 unsigned long addr = convert_ip_to_linear(child, regs);
14847
14848 + if (addr == -EINVAL)
14849 + return 0;
14850 +
14851 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14852 for (i = 0; i < copied; i++) {
14853 switch (opcode[i]) {
14854 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14855
14856 #ifdef CONFIG_X86_64
14857 case 0x40 ... 0x4f:
14858 - if (regs->cs != __USER_CS)
14859 + if ((regs->cs & 0xffff) != __USER_CS)
14860 /* 32-bit mode: register increment */
14861 return 0;
14862 /* 64-bit mode: REX prefix */
14863 diff -urNp linux-3.0.4/arch/x86/kernel/syscall_table_32.S linux-3.0.4/arch/x86/kernel/syscall_table_32.S
14864 --- linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
14865 +++ linux-3.0.4/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
14866 @@ -1,3 +1,4 @@
14867 +.section .rodata,"a",@progbits
14868 ENTRY(sys_call_table)
14869 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14870 .long sys_exit
14871 diff -urNp linux-3.0.4/arch/x86/kernel/sys_i386_32.c linux-3.0.4/arch/x86/kernel/sys_i386_32.c
14872 --- linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
14873 +++ linux-3.0.4/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
14874 @@ -24,17 +24,224 @@
14875
14876 #include <asm/syscalls.h>
14877
14878 -/*
14879 - * Do a system call from kernel instead of calling sys_execve so we
14880 - * end up with proper pt_regs.
14881 - */
14882 -int kernel_execve(const char *filename,
14883 - const char *const argv[],
14884 - const char *const envp[])
14885 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14886 {
14887 - long __res;
14888 - asm volatile ("int $0x80"
14889 - : "=a" (__res)
14890 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14891 - return __res;
14892 + unsigned long pax_task_size = TASK_SIZE;
14893 +
14894 +#ifdef CONFIG_PAX_SEGMEXEC
14895 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14896 + pax_task_size = SEGMEXEC_TASK_SIZE;
14897 +#endif
14898 +
14899 + if (len > pax_task_size || addr > pax_task_size - len)
14900 + return -EINVAL;
14901 +
14902 + return 0;
14903 +}
14904 +
14905 +unsigned long
14906 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14907 + unsigned long len, unsigned long pgoff, unsigned long flags)
14908 +{
14909 + struct mm_struct *mm = current->mm;
14910 + struct vm_area_struct *vma;
14911 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14912 +
14913 +#ifdef CONFIG_PAX_SEGMEXEC
14914 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14915 + pax_task_size = SEGMEXEC_TASK_SIZE;
14916 +#endif
14917 +
14918 + pax_task_size -= PAGE_SIZE;
14919 +
14920 + if (len > pax_task_size)
14921 + return -ENOMEM;
14922 +
14923 + if (flags & MAP_FIXED)
14924 + return addr;
14925 +
14926 +#ifdef CONFIG_PAX_RANDMMAP
14927 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14928 +#endif
14929 +
14930 + if (addr) {
14931 + addr = PAGE_ALIGN(addr);
14932 + if (pax_task_size - len >= addr) {
14933 + vma = find_vma(mm, addr);
14934 + if (check_heap_stack_gap(vma, addr, len))
14935 + return addr;
14936 + }
14937 + }
14938 + if (len > mm->cached_hole_size) {
14939 + start_addr = addr = mm->free_area_cache;
14940 + } else {
14941 + start_addr = addr = mm->mmap_base;
14942 + mm->cached_hole_size = 0;
14943 + }
14944 +
14945 +#ifdef CONFIG_PAX_PAGEEXEC
14946 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14947 + start_addr = 0x00110000UL;
14948 +
14949 +#ifdef CONFIG_PAX_RANDMMAP
14950 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14951 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14952 +#endif
14953 +
14954 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14955 + start_addr = addr = mm->mmap_base;
14956 + else
14957 + addr = start_addr;
14958 + }
14959 +#endif
14960 +
14961 +full_search:
14962 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14963 + /* At this point: (!vma || addr < vma->vm_end). */
14964 + if (pax_task_size - len < addr) {
14965 + /*
14966 + * Start a new search - just in case we missed
14967 + * some holes.
14968 + */
14969 + if (start_addr != mm->mmap_base) {
14970 + start_addr = addr = mm->mmap_base;
14971 + mm->cached_hole_size = 0;
14972 + goto full_search;
14973 + }
14974 + return -ENOMEM;
14975 + }
14976 + if (check_heap_stack_gap(vma, addr, len))
14977 + break;
14978 + if (addr + mm->cached_hole_size < vma->vm_start)
14979 + mm->cached_hole_size = vma->vm_start - addr;
14980 + addr = vma->vm_end;
14981 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14982 + start_addr = addr = mm->mmap_base;
14983 + mm->cached_hole_size = 0;
14984 + goto full_search;
14985 + }
14986 + }
14987 +
14988 + /*
14989 + * Remember the place where we stopped the search:
14990 + */
14991 + mm->free_area_cache = addr + len;
14992 + return addr;
14993 +}
14994 +
14995 +unsigned long
14996 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14997 + const unsigned long len, const unsigned long pgoff,
14998 + const unsigned long flags)
14999 +{
15000 + struct vm_area_struct *vma;
15001 + struct mm_struct *mm = current->mm;
15002 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15003 +
15004 +#ifdef CONFIG_PAX_SEGMEXEC
15005 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15006 + pax_task_size = SEGMEXEC_TASK_SIZE;
15007 +#endif
15008 +
15009 + pax_task_size -= PAGE_SIZE;
15010 +
15011 + /* requested length too big for entire address space */
15012 + if (len > pax_task_size)
15013 + return -ENOMEM;
15014 +
15015 + if (flags & MAP_FIXED)
15016 + return addr;
15017 +
15018 +#ifdef CONFIG_PAX_PAGEEXEC
15019 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15020 + goto bottomup;
15021 +#endif
15022 +
15023 +#ifdef CONFIG_PAX_RANDMMAP
15024 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15025 +#endif
15026 +
15027 + /* requesting a specific address */
15028 + if (addr) {
15029 + addr = PAGE_ALIGN(addr);
15030 + if (pax_task_size - len >= addr) {
15031 + vma = find_vma(mm, addr);
15032 + if (check_heap_stack_gap(vma, addr, len))
15033 + return addr;
15034 + }
15035 + }
15036 +
15037 + /* check if free_area_cache is useful for us */
15038 + if (len <= mm->cached_hole_size) {
15039 + mm->cached_hole_size = 0;
15040 + mm->free_area_cache = mm->mmap_base;
15041 + }
15042 +
15043 + /* either no address requested or can't fit in requested address hole */
15044 + addr = mm->free_area_cache;
15045 +
15046 + /* make sure it can fit in the remaining address space */
15047 + if (addr > len) {
15048 + vma = find_vma(mm, addr-len);
15049 + if (check_heap_stack_gap(vma, addr - len, len))
15050 + /* remember the address as a hint for next time */
15051 + return (mm->free_area_cache = addr-len);
15052 + }
15053 +
15054 + if (mm->mmap_base < len)
15055 + goto bottomup;
15056 +
15057 + addr = mm->mmap_base-len;
15058 +
15059 + do {
15060 + /*
15061 + * Lookup failure means no vma is above this address,
15062 + * else if new region fits below vma->vm_start,
15063 + * return with success:
15064 + */
15065 + vma = find_vma(mm, addr);
15066 + if (check_heap_stack_gap(vma, addr, len))
15067 + /* remember the address as a hint for next time */
15068 + return (mm->free_area_cache = addr);
15069 +
15070 + /* remember the largest hole we saw so far */
15071 + if (addr + mm->cached_hole_size < vma->vm_start)
15072 + mm->cached_hole_size = vma->vm_start - addr;
15073 +
15074 + /* try just below the current vma->vm_start */
15075 + addr = skip_heap_stack_gap(vma, len);
15076 + } while (!IS_ERR_VALUE(addr));
15077 +
15078 +bottomup:
15079 + /*
15080 + * A failed mmap() very likely causes application failure,
15081 + * so fall back to the bottom-up function here. This scenario
15082 + * can happen with large stack limits and large mmap()
15083 + * allocations.
15084 + */
15085 +
15086 +#ifdef CONFIG_PAX_SEGMEXEC
15087 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15088 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15089 + else
15090 +#endif
15091 +
15092 + mm->mmap_base = TASK_UNMAPPED_BASE;
15093 +
15094 +#ifdef CONFIG_PAX_RANDMMAP
15095 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15096 + mm->mmap_base += mm->delta_mmap;
15097 +#endif
15098 +
15099 + mm->free_area_cache = mm->mmap_base;
15100 + mm->cached_hole_size = ~0UL;
15101 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15102 + /*
15103 + * Restore the topdown base:
15104 + */
15105 + mm->mmap_base = base;
15106 + mm->free_area_cache = base;
15107 + mm->cached_hole_size = ~0UL;
15108 +
15109 + return addr;
15110 }
15111 diff -urNp linux-3.0.4/arch/x86/kernel/sys_x86_64.c linux-3.0.4/arch/x86/kernel/sys_x86_64.c
15112 --- linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15113 +++ linux-3.0.4/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15114 @@ -32,8 +32,8 @@ out:
15115 return error;
15116 }
15117
15118 -static void find_start_end(unsigned long flags, unsigned long *begin,
15119 - unsigned long *end)
15120 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15121 + unsigned long *begin, unsigned long *end)
15122 {
15123 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15124 unsigned long new_begin;
15125 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15126 *begin = new_begin;
15127 }
15128 } else {
15129 - *begin = TASK_UNMAPPED_BASE;
15130 + *begin = mm->mmap_base;
15131 *end = TASK_SIZE;
15132 }
15133 }
15134 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15135 if (flags & MAP_FIXED)
15136 return addr;
15137
15138 - find_start_end(flags, &begin, &end);
15139 + find_start_end(mm, flags, &begin, &end);
15140
15141 if (len > end)
15142 return -ENOMEM;
15143
15144 +#ifdef CONFIG_PAX_RANDMMAP
15145 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15146 +#endif
15147 +
15148 if (addr) {
15149 addr = PAGE_ALIGN(addr);
15150 vma = find_vma(mm, addr);
15151 - if (end - len >= addr &&
15152 - (!vma || addr + len <= vma->vm_start))
15153 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15154 return addr;
15155 }
15156 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15157 @@ -106,7 +109,7 @@ full_search:
15158 }
15159 return -ENOMEM;
15160 }
15161 - if (!vma || addr + len <= vma->vm_start) {
15162 + if (check_heap_stack_gap(vma, addr, len)) {
15163 /*
15164 * Remember the place where we stopped the search:
15165 */
15166 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15167 {
15168 struct vm_area_struct *vma;
15169 struct mm_struct *mm = current->mm;
15170 - unsigned long addr = addr0;
15171 + unsigned long base = mm->mmap_base, addr = addr0;
15172
15173 /* requested length too big for entire address space */
15174 if (len > TASK_SIZE)
15175 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15176 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15177 goto bottomup;
15178
15179 +#ifdef CONFIG_PAX_RANDMMAP
15180 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15181 +#endif
15182 +
15183 /* requesting a specific address */
15184 if (addr) {
15185 addr = PAGE_ALIGN(addr);
15186 - vma = find_vma(mm, addr);
15187 - if (TASK_SIZE - len >= addr &&
15188 - (!vma || addr + len <= vma->vm_start))
15189 - return addr;
15190 + if (TASK_SIZE - len >= addr) {
15191 + vma = find_vma(mm, addr);
15192 + if (check_heap_stack_gap(vma, addr, len))
15193 + return addr;
15194 + }
15195 }
15196
15197 /* check if free_area_cache is useful for us */
15198 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15199 /* make sure it can fit in the remaining address space */
15200 if (addr > len) {
15201 vma = find_vma(mm, addr-len);
15202 - if (!vma || addr <= vma->vm_start)
15203 + if (check_heap_stack_gap(vma, addr - len, len))
15204 /* remember the address as a hint for next time */
15205 return mm->free_area_cache = addr-len;
15206 }
15207 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15208 * return with success:
15209 */
15210 vma = find_vma(mm, addr);
15211 - if (!vma || addr+len <= vma->vm_start)
15212 + if (check_heap_stack_gap(vma, addr, len))
15213 /* remember the address as a hint for next time */
15214 return mm->free_area_cache = addr;
15215
15216 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15217 mm->cached_hole_size = vma->vm_start - addr;
15218
15219 /* try just below the current vma->vm_start */
15220 - addr = vma->vm_start-len;
15221 - } while (len < vma->vm_start);
15222 + addr = skip_heap_stack_gap(vma, len);
15223 + } while (!IS_ERR_VALUE(addr));
15224
15225 bottomup:
15226 /*
15227 @@ -198,13 +206,21 @@ bottomup:
15228 * can happen with large stack limits and large mmap()
15229 * allocations.
15230 */
15231 + mm->mmap_base = TASK_UNMAPPED_BASE;
15232 +
15233 +#ifdef CONFIG_PAX_RANDMMAP
15234 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15235 + mm->mmap_base += mm->delta_mmap;
15236 +#endif
15237 +
15238 + mm->free_area_cache = mm->mmap_base;
15239 mm->cached_hole_size = ~0UL;
15240 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15241 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15242 /*
15243 * Restore the topdown base:
15244 */
15245 - mm->free_area_cache = mm->mmap_base;
15246 + mm->mmap_base = base;
15247 + mm->free_area_cache = base;
15248 mm->cached_hole_size = ~0UL;
15249
15250 return addr;
15251 diff -urNp linux-3.0.4/arch/x86/kernel/tboot.c linux-3.0.4/arch/x86/kernel/tboot.c
15252 --- linux-3.0.4/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15253 +++ linux-3.0.4/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15254 @@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15255
15256 void tboot_shutdown(u32 shutdown_type)
15257 {
15258 - void (*shutdown)(void);
15259 + void (* __noreturn shutdown)(void);
15260
15261 if (!tboot_enabled())
15262 return;
15263 @@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15264
15265 switch_to_tboot_pt();
15266
15267 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15268 + shutdown = (void *)tboot->shutdown_entry;
15269 shutdown();
15270
15271 /* should not reach here */
15272 @@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15273 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15274 }
15275
15276 -static atomic_t ap_wfs_count;
15277 +static atomic_unchecked_t ap_wfs_count;
15278
15279 static int tboot_wait_for_aps(int num_aps)
15280 {
15281 @@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15282 {
15283 switch (action) {
15284 case CPU_DYING:
15285 - atomic_inc(&ap_wfs_count);
15286 + atomic_inc_unchecked(&ap_wfs_count);
15287 if (num_online_cpus() == 1)
15288 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15289 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15290 return NOTIFY_BAD;
15291 break;
15292 }
15293 @@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15294
15295 tboot_create_trampoline();
15296
15297 - atomic_set(&ap_wfs_count, 0);
15298 + atomic_set_unchecked(&ap_wfs_count, 0);
15299 register_hotcpu_notifier(&tboot_cpu_notifier);
15300 return 0;
15301 }
15302 diff -urNp linux-3.0.4/arch/x86/kernel/time.c linux-3.0.4/arch/x86/kernel/time.c
15303 --- linux-3.0.4/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15304 +++ linux-3.0.4/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15305 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15306 {
15307 unsigned long pc = instruction_pointer(regs);
15308
15309 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15310 + if (!user_mode(regs) && in_lock_functions(pc)) {
15311 #ifdef CONFIG_FRAME_POINTER
15312 - return *(unsigned long *)(regs->bp + sizeof(long));
15313 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15314 #else
15315 unsigned long *sp =
15316 (unsigned long *)kernel_stack_pointer(regs);
15317 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15318 * or above a saved flags. Eflags has bits 22-31 zero,
15319 * kernel addresses don't.
15320 */
15321 +
15322 +#ifdef CONFIG_PAX_KERNEXEC
15323 + return ktla_ktva(sp[0]);
15324 +#else
15325 if (sp[0] >> 22)
15326 return sp[0];
15327 if (sp[1] >> 22)
15328 return sp[1];
15329 #endif
15330 +
15331 +#endif
15332 }
15333 return pc;
15334 }
15335 diff -urNp linux-3.0.4/arch/x86/kernel/tls.c linux-3.0.4/arch/x86/kernel/tls.c
15336 --- linux-3.0.4/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15337 +++ linux-3.0.4/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15338 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15339 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15340 return -EINVAL;
15341
15342 +#ifdef CONFIG_PAX_SEGMEXEC
15343 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15344 + return -EINVAL;
15345 +#endif
15346 +
15347 set_tls_desc(p, idx, &info, 1);
15348
15349 return 0;
15350 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_32.S linux-3.0.4/arch/x86/kernel/trampoline_32.S
15351 --- linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15352 +++ linux-3.0.4/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15353 @@ -32,6 +32,12 @@
15354 #include <asm/segment.h>
15355 #include <asm/page_types.h>
15356
15357 +#ifdef CONFIG_PAX_KERNEXEC
15358 +#define ta(X) (X)
15359 +#else
15360 +#define ta(X) ((X) - __PAGE_OFFSET)
15361 +#endif
15362 +
15363 #ifdef CONFIG_SMP
15364
15365 .section ".x86_trampoline","a"
15366 @@ -62,7 +68,7 @@ r_base = .
15367 inc %ax # protected mode (PE) bit
15368 lmsw %ax # into protected mode
15369 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15370 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15371 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15372
15373 # These need to be in the same 64K segment as the above;
15374 # hence we don't use the boot_gdt_descr defined in head.S
15375 diff -urNp linux-3.0.4/arch/x86/kernel/trampoline_64.S linux-3.0.4/arch/x86/kernel/trampoline_64.S
15376 --- linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15377 +++ linux-3.0.4/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15378 @@ -90,7 +90,7 @@ startup_32:
15379 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15380 movl %eax, %ds
15381
15382 - movl $X86_CR4_PAE, %eax
15383 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15384 movl %eax, %cr4 # Enable PAE mode
15385
15386 # Setup trampoline 4 level pagetables
15387 @@ -138,7 +138,7 @@ tidt:
15388 # so the kernel can live anywhere
15389 .balign 4
15390 tgdt:
15391 - .short tgdt_end - tgdt # gdt limit
15392 + .short tgdt_end - tgdt - 1 # gdt limit
15393 .long tgdt - r_base
15394 .short 0
15395 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15396 diff -urNp linux-3.0.4/arch/x86/kernel/traps.c linux-3.0.4/arch/x86/kernel/traps.c
15397 --- linux-3.0.4/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15398 +++ linux-3.0.4/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15399 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15400
15401 /* Do we ignore FPU interrupts ? */
15402 char ignore_fpu_irq;
15403 -
15404 -/*
15405 - * The IDT has to be page-aligned to simplify the Pentium
15406 - * F0 0F bug workaround.
15407 - */
15408 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15409 #endif
15410
15411 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15412 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15413 }
15414
15415 static void __kprobes
15416 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15417 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15418 long error_code, siginfo_t *info)
15419 {
15420 struct task_struct *tsk = current;
15421
15422 #ifdef CONFIG_X86_32
15423 - if (regs->flags & X86_VM_MASK) {
15424 + if (v8086_mode(regs)) {
15425 /*
15426 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15427 * On nmi (interrupt 2), do_trap should not be called.
15428 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15429 }
15430 #endif
15431
15432 - if (!user_mode(regs))
15433 + if (!user_mode_novm(regs))
15434 goto kernel_trap;
15435
15436 #ifdef CONFIG_X86_32
15437 @@ -157,7 +151,7 @@ trap_signal:
15438 printk_ratelimit()) {
15439 printk(KERN_INFO
15440 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15441 - tsk->comm, tsk->pid, str,
15442 + tsk->comm, task_pid_nr(tsk), str,
15443 regs->ip, regs->sp, error_code);
15444 print_vma_addr(" in ", regs->ip);
15445 printk("\n");
15446 @@ -174,8 +168,20 @@ kernel_trap:
15447 if (!fixup_exception(regs)) {
15448 tsk->thread.error_code = error_code;
15449 tsk->thread.trap_no = trapnr;
15450 +
15451 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15452 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15453 + str = "PAX: suspicious stack segment fault";
15454 +#endif
15455 +
15456 die(str, regs, error_code);
15457 }
15458 +
15459 +#ifdef CONFIG_PAX_REFCOUNT
15460 + if (trapnr == 4)
15461 + pax_report_refcount_overflow(regs);
15462 +#endif
15463 +
15464 return;
15465
15466 #ifdef CONFIG_X86_32
15467 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15468 conditional_sti(regs);
15469
15470 #ifdef CONFIG_X86_32
15471 - if (regs->flags & X86_VM_MASK)
15472 + if (v8086_mode(regs))
15473 goto gp_in_vm86;
15474 #endif
15475
15476 tsk = current;
15477 - if (!user_mode(regs))
15478 + if (!user_mode_novm(regs))
15479 goto gp_in_kernel;
15480
15481 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15482 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15483 + struct mm_struct *mm = tsk->mm;
15484 + unsigned long limit;
15485 +
15486 + down_write(&mm->mmap_sem);
15487 + limit = mm->context.user_cs_limit;
15488 + if (limit < TASK_SIZE) {
15489 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15490 + up_write(&mm->mmap_sem);
15491 + return;
15492 + }
15493 + up_write(&mm->mmap_sem);
15494 + }
15495 +#endif
15496 +
15497 tsk->thread.error_code = error_code;
15498 tsk->thread.trap_no = 13;
15499
15500 @@ -304,6 +326,13 @@ gp_in_kernel:
15501 if (notify_die(DIE_GPF, "general protection fault", regs,
15502 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15503 return;
15504 +
15505 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15506 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15507 + die("PAX: suspicious general protection fault", regs, error_code);
15508 + else
15509 +#endif
15510 +
15511 die("general protection fault", regs, error_code);
15512 }
15513
15514 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15515 dotraplinkage notrace __kprobes void
15516 do_nmi(struct pt_regs *regs, long error_code)
15517 {
15518 +
15519 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15520 + if (!user_mode(regs)) {
15521 + unsigned long cs = regs->cs & 0xFFFF;
15522 + unsigned long ip = ktva_ktla(regs->ip);
15523 +
15524 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15525 + regs->ip = ip;
15526 + }
15527 +#endif
15528 +
15529 nmi_enter();
15530
15531 inc_irq_stat(__nmi_count);
15532 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15533 /* It's safe to allow irq's after DR6 has been saved */
15534 preempt_conditional_sti(regs);
15535
15536 - if (regs->flags & X86_VM_MASK) {
15537 + if (v8086_mode(regs)) {
15538 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15539 error_code, 1);
15540 preempt_conditional_cli(regs);
15541 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15542 * We already checked v86 mode above, so we can check for kernel mode
15543 * by just checking the CPL of CS.
15544 */
15545 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15546 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15547 tsk->thread.debugreg6 &= ~DR_STEP;
15548 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15549 regs->flags &= ~X86_EFLAGS_TF;
15550 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15551 return;
15552 conditional_sti(regs);
15553
15554 - if (!user_mode_vm(regs))
15555 + if (!user_mode(regs))
15556 {
15557 if (!fixup_exception(regs)) {
15558 task->thread.error_code = error_code;
15559 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15560 void __math_state_restore(void)
15561 {
15562 struct thread_info *thread = current_thread_info();
15563 - struct task_struct *tsk = thread->task;
15564 + struct task_struct *tsk = current;
15565
15566 /*
15567 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15568 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15569 */
15570 asmlinkage void math_state_restore(void)
15571 {
15572 - struct thread_info *thread = current_thread_info();
15573 - struct task_struct *tsk = thread->task;
15574 + struct task_struct *tsk = current;
15575
15576 if (!tsk_used_math(tsk)) {
15577 local_irq_enable();
15578 diff -urNp linux-3.0.4/arch/x86/kernel/verify_cpu.S linux-3.0.4/arch/x86/kernel/verify_cpu.S
15579 --- linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15580 +++ linux-3.0.4/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15581 @@ -20,6 +20,7 @@
15582 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15583 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15584 * arch/x86/kernel/head_32.S: processor startup
15585 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15586 *
15587 * verify_cpu, returns the status of longmode and SSE in register %eax.
15588 * 0: Success 1: Failure
15589 diff -urNp linux-3.0.4/arch/x86/kernel/vm86_32.c linux-3.0.4/arch/x86/kernel/vm86_32.c
15590 --- linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15591 +++ linux-3.0.4/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15592 @@ -41,6 +41,7 @@
15593 #include <linux/ptrace.h>
15594 #include <linux/audit.h>
15595 #include <linux/stddef.h>
15596 +#include <linux/grsecurity.h>
15597
15598 #include <asm/uaccess.h>
15599 #include <asm/io.h>
15600 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15601 do_exit(SIGSEGV);
15602 }
15603
15604 - tss = &per_cpu(init_tss, get_cpu());
15605 + tss = init_tss + get_cpu();
15606 current->thread.sp0 = current->thread.saved_sp0;
15607 current->thread.sysenter_cs = __KERNEL_CS;
15608 load_sp0(tss, &current->thread);
15609 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15610 struct task_struct *tsk;
15611 int tmp, ret = -EPERM;
15612
15613 +#ifdef CONFIG_GRKERNSEC_VM86
15614 + if (!capable(CAP_SYS_RAWIO)) {
15615 + gr_handle_vm86();
15616 + goto out;
15617 + }
15618 +#endif
15619 +
15620 tsk = current;
15621 if (tsk->thread.saved_sp0)
15622 goto out;
15623 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15624 int tmp, ret;
15625 struct vm86plus_struct __user *v86;
15626
15627 +#ifdef CONFIG_GRKERNSEC_VM86
15628 + if (!capable(CAP_SYS_RAWIO)) {
15629 + gr_handle_vm86();
15630 + ret = -EPERM;
15631 + goto out;
15632 + }
15633 +#endif
15634 +
15635 tsk = current;
15636 switch (cmd) {
15637 case VM86_REQUEST_IRQ:
15638 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15639 tsk->thread.saved_fs = info->regs32->fs;
15640 tsk->thread.saved_gs = get_user_gs(info->regs32);
15641
15642 - tss = &per_cpu(init_tss, get_cpu());
15643 + tss = init_tss + get_cpu();
15644 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15645 if (cpu_has_sep)
15646 tsk->thread.sysenter_cs = 0;
15647 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15648 goto cannot_handle;
15649 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15650 goto cannot_handle;
15651 - intr_ptr = (unsigned long __user *) (i << 2);
15652 + intr_ptr = (__force unsigned long __user *) (i << 2);
15653 if (get_user(segoffs, intr_ptr))
15654 goto cannot_handle;
15655 if ((segoffs >> 16) == BIOSSEG)
15656 diff -urNp linux-3.0.4/arch/x86/kernel/vmlinux.lds.S linux-3.0.4/arch/x86/kernel/vmlinux.lds.S
15657 --- linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15658 +++ linux-3.0.4/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15659 @@ -26,6 +26,13 @@
15660 #include <asm/page_types.h>
15661 #include <asm/cache.h>
15662 #include <asm/boot.h>
15663 +#include <asm/segment.h>
15664 +
15665 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15666 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15667 +#else
15668 +#define __KERNEL_TEXT_OFFSET 0
15669 +#endif
15670
15671 #undef i386 /* in case the preprocessor is a 32bit one */
15672
15673 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15674
15675 PHDRS {
15676 text PT_LOAD FLAGS(5); /* R_E */
15677 +#ifdef CONFIG_X86_32
15678 + module PT_LOAD FLAGS(5); /* R_E */
15679 +#endif
15680 +#ifdef CONFIG_XEN
15681 + rodata PT_LOAD FLAGS(5); /* R_E */
15682 +#else
15683 + rodata PT_LOAD FLAGS(4); /* R__ */
15684 +#endif
15685 data PT_LOAD FLAGS(6); /* RW_ */
15686 #ifdef CONFIG_X86_64
15687 user PT_LOAD FLAGS(5); /* R_E */
15688 +#endif
15689 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15690 #ifdef CONFIG_SMP
15691 percpu PT_LOAD FLAGS(6); /* RW_ */
15692 #endif
15693 + text.init PT_LOAD FLAGS(5); /* R_E */
15694 + text.exit PT_LOAD FLAGS(5); /* R_E */
15695 init PT_LOAD FLAGS(7); /* RWE */
15696 -#endif
15697 note PT_NOTE FLAGS(0); /* ___ */
15698 }
15699
15700 SECTIONS
15701 {
15702 #ifdef CONFIG_X86_32
15703 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15704 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15705 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15706 #else
15707 - . = __START_KERNEL;
15708 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15709 + . = __START_KERNEL;
15710 #endif
15711
15712 /* Text and read-only data */
15713 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15714 - _text = .;
15715 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15716 /* bootstrapping code */
15717 +#ifdef CONFIG_X86_32
15718 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15719 +#else
15720 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15721 +#endif
15722 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15723 + _text = .;
15724 HEAD_TEXT
15725 #ifdef CONFIG_X86_32
15726 . = ALIGN(PAGE_SIZE);
15727 @@ -109,13 +131,47 @@ SECTIONS
15728 IRQENTRY_TEXT
15729 *(.fixup)
15730 *(.gnu.warning)
15731 - /* End of text section */
15732 - _etext = .;
15733 } :text = 0x9090
15734
15735 - NOTES :text :note
15736 + . += __KERNEL_TEXT_OFFSET;
15737 +
15738 +#ifdef CONFIG_X86_32
15739 + . = ALIGN(PAGE_SIZE);
15740 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15741 +
15742 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15743 + MODULES_EXEC_VADDR = .;
15744 + BYTE(0)
15745 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15746 + . = ALIGN(HPAGE_SIZE);
15747 + MODULES_EXEC_END = . - 1;
15748 +#endif
15749 +
15750 + } :module
15751 +#endif
15752 +
15753 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15754 + /* End of text section */
15755 + _etext = . - __KERNEL_TEXT_OFFSET;
15756 + }
15757 +
15758 +#ifdef CONFIG_X86_32
15759 + . = ALIGN(PAGE_SIZE);
15760 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15761 + *(.idt)
15762 + . = ALIGN(PAGE_SIZE);
15763 + *(.empty_zero_page)
15764 + *(.initial_pg_fixmap)
15765 + *(.initial_pg_pmd)
15766 + *(.initial_page_table)
15767 + *(.swapper_pg_dir)
15768 + } :rodata
15769 +#endif
15770 +
15771 + . = ALIGN(PAGE_SIZE);
15772 + NOTES :rodata :note
15773
15774 - EXCEPTION_TABLE(16) :text = 0x9090
15775 + EXCEPTION_TABLE(16) :rodata
15776
15777 #if defined(CONFIG_DEBUG_RODATA)
15778 /* .text should occupy whole number of pages */
15779 @@ -127,16 +183,20 @@ SECTIONS
15780
15781 /* Data */
15782 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15783 +
15784 +#ifdef CONFIG_PAX_KERNEXEC
15785 + . = ALIGN(HPAGE_SIZE);
15786 +#else
15787 + . = ALIGN(PAGE_SIZE);
15788 +#endif
15789 +
15790 /* Start of data section */
15791 _sdata = .;
15792
15793 /* init_task */
15794 INIT_TASK_DATA(THREAD_SIZE)
15795
15796 -#ifdef CONFIG_X86_32
15797 - /* 32 bit has nosave before _edata */
15798 NOSAVE_DATA
15799 -#endif
15800
15801 PAGE_ALIGNED_DATA(PAGE_SIZE)
15802
15803 @@ -208,12 +268,19 @@ SECTIONS
15804 #endif /* CONFIG_X86_64 */
15805
15806 /* Init code and data - will be freed after init */
15807 - . = ALIGN(PAGE_SIZE);
15808 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15809 + BYTE(0)
15810 +
15811 +#ifdef CONFIG_PAX_KERNEXEC
15812 + . = ALIGN(HPAGE_SIZE);
15813 +#else
15814 + . = ALIGN(PAGE_SIZE);
15815 +#endif
15816 +
15817 __init_begin = .; /* paired with __init_end */
15818 - }
15819 + } :init.begin
15820
15821 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15822 +#ifdef CONFIG_SMP
15823 /*
15824 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15825 * output PHDR, so the next output section - .init.text - should
15826 @@ -222,12 +289,27 @@ SECTIONS
15827 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15828 #endif
15829
15830 - INIT_TEXT_SECTION(PAGE_SIZE)
15831 -#ifdef CONFIG_X86_64
15832 - :init
15833 -#endif
15834 + . = ALIGN(PAGE_SIZE);
15835 + init_begin = .;
15836 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15837 + VMLINUX_SYMBOL(_sinittext) = .;
15838 + INIT_TEXT
15839 + VMLINUX_SYMBOL(_einittext) = .;
15840 + . = ALIGN(PAGE_SIZE);
15841 + } :text.init
15842
15843 - INIT_DATA_SECTION(16)
15844 + /*
15845 + * .exit.text is discard at runtime, not link time, to deal with
15846 + * references from .altinstructions and .eh_frame
15847 + */
15848 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15849 + EXIT_TEXT
15850 + . = ALIGN(16);
15851 + } :text.exit
15852 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15853 +
15854 + . = ALIGN(PAGE_SIZE);
15855 + INIT_DATA_SECTION(16) :init
15856
15857 /*
15858 * Code and data for a variety of lowlevel trampolines, to be
15859 @@ -301,19 +383,12 @@ SECTIONS
15860 }
15861
15862 . = ALIGN(8);
15863 - /*
15864 - * .exit.text is discard at runtime, not link time, to deal with
15865 - * references from .altinstructions and .eh_frame
15866 - */
15867 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15868 - EXIT_TEXT
15869 - }
15870
15871 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15872 EXIT_DATA
15873 }
15874
15875 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15876 +#ifndef CONFIG_SMP
15877 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
15878 #endif
15879
15880 @@ -332,16 +407,10 @@ SECTIONS
15881 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15882 __smp_locks = .;
15883 *(.smp_locks)
15884 - . = ALIGN(PAGE_SIZE);
15885 __smp_locks_end = .;
15886 + . = ALIGN(PAGE_SIZE);
15887 }
15888
15889 -#ifdef CONFIG_X86_64
15890 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15891 - NOSAVE_DATA
15892 - }
15893 -#endif
15894 -
15895 /* BSS */
15896 . = ALIGN(PAGE_SIZE);
15897 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15898 @@ -357,6 +426,7 @@ SECTIONS
15899 __brk_base = .;
15900 . += 64 * 1024; /* 64k alignment slop space */
15901 *(.brk_reservation) /* areas brk users have reserved */
15902 + . = ALIGN(HPAGE_SIZE);
15903 __brk_limit = .;
15904 }
15905
15906 @@ -383,13 +453,12 @@ SECTIONS
15907 * for the boot processor.
15908 */
15909 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15910 -INIT_PER_CPU(gdt_page);
15911 INIT_PER_CPU(irq_stack_union);
15912
15913 /*
15914 * Build-time check on the image size:
15915 */
15916 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15917 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15918 "kernel image bigger than KERNEL_IMAGE_SIZE");
15919
15920 #ifdef CONFIG_SMP
15921 diff -urNp linux-3.0.4/arch/x86/kernel/vsyscall_64.c linux-3.0.4/arch/x86/kernel/vsyscall_64.c
15922 --- linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
15923 +++ linux-3.0.4/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
15924 @@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
15925 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15926 {
15927 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15928 - .sysctl_enabled = 1,
15929 + .sysctl_enabled = 0,
15930 };
15931
15932 void update_vsyscall_tz(void)
15933 @@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
15934 static ctl_table kernel_table2[] = {
15935 { .procname = "vsyscall64",
15936 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
15937 - .mode = 0644,
15938 + .mode = 0444,
15939 .proc_handler = proc_dointvec },
15940 {}
15941 };
15942 diff -urNp linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c
15943 --- linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
15944 +++ linux-3.0.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
15945 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15946 EXPORT_SYMBOL(copy_user_generic_string);
15947 EXPORT_SYMBOL(copy_user_generic_unrolled);
15948 EXPORT_SYMBOL(__copy_user_nocache);
15949 -EXPORT_SYMBOL(_copy_from_user);
15950 -EXPORT_SYMBOL(_copy_to_user);
15951
15952 EXPORT_SYMBOL(copy_page);
15953 EXPORT_SYMBOL(clear_page);
15954 diff -urNp linux-3.0.4/arch/x86/kernel/xsave.c linux-3.0.4/arch/x86/kernel/xsave.c
15955 --- linux-3.0.4/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
15956 +++ linux-3.0.4/arch/x86/kernel/xsave.c 2011-08-23 21:47:55.000000000 -0400
15957 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15958 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15959 return -EINVAL;
15960
15961 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15962 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15963 fx_sw_user->extended_size -
15964 FP_XSTATE_MAGIC2_SIZE));
15965 if (err)
15966 @@ -267,7 +267,7 @@ fx_only:
15967 * the other extended state.
15968 */
15969 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15970 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15971 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15972 }
15973
15974 /*
15975 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15976 if (use_xsave())
15977 err = restore_user_xstate(buf);
15978 else
15979 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15980 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15981 buf);
15982 if (unlikely(err)) {
15983 /*
15984 diff -urNp linux-3.0.4/arch/x86/kvm/emulate.c linux-3.0.4/arch/x86/kvm/emulate.c
15985 --- linux-3.0.4/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
15986 +++ linux-3.0.4/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
15987 @@ -96,7 +96,7 @@
15988 #define Src2ImmByte (2<<29)
15989 #define Src2One (3<<29)
15990 #define Src2Imm (4<<29)
15991 -#define Src2Mask (7<<29)
15992 +#define Src2Mask (7U<<29)
15993
15994 #define X2(x...) x, x
15995 #define X3(x...) X2(x), x
15996 @@ -207,6 +207,7 @@ struct gprefix {
15997
15998 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15999 do { \
16000 + unsigned long _tmp; \
16001 __asm__ __volatile__ ( \
16002 _PRE_EFLAGS("0", "4", "2") \
16003 _op _suffix " %"_x"3,%1; " \
16004 @@ -220,8 +221,6 @@ struct gprefix {
16005 /* Raw emulation: instruction has two explicit operands. */
16006 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16007 do { \
16008 - unsigned long _tmp; \
16009 - \
16010 switch ((_dst).bytes) { \
16011 case 2: \
16012 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16013 @@ -237,7 +236,6 @@ struct gprefix {
16014
16015 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16016 do { \
16017 - unsigned long _tmp; \
16018 switch ((_dst).bytes) { \
16019 case 1: \
16020 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16021 diff -urNp linux-3.0.4/arch/x86/kvm/lapic.c linux-3.0.4/arch/x86/kvm/lapic.c
16022 --- linux-3.0.4/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16023 +++ linux-3.0.4/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16024 @@ -53,7 +53,7 @@
16025 #define APIC_BUS_CYCLE_NS 1
16026
16027 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16028 -#define apic_debug(fmt, arg...)
16029 +#define apic_debug(fmt, arg...) do {} while (0)
16030
16031 #define APIC_LVT_NUM 6
16032 /* 14 is the version for Xeon and Pentium 8.4.8*/
16033 diff -urNp linux-3.0.4/arch/x86/kvm/mmu.c linux-3.0.4/arch/x86/kvm/mmu.c
16034 --- linux-3.0.4/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16035 +++ linux-3.0.4/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16036 @@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16037
16038 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16039
16040 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16041 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16042
16043 /*
16044 * Assume that the pte write on a page table of the same type
16045 @@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16046 }
16047
16048 spin_lock(&vcpu->kvm->mmu_lock);
16049 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16050 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16051 gentry = 0;
16052 kvm_mmu_free_some_pages(vcpu);
16053 ++vcpu->kvm->stat.mmu_pte_write;
16054 diff -urNp linux-3.0.4/arch/x86/kvm/paging_tmpl.h linux-3.0.4/arch/x86/kvm/paging_tmpl.h
16055 --- linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16056 +++ linux-3.0.4/arch/x86/kvm/paging_tmpl.h 2011-08-23 21:48:14.000000000 -0400
16057 @@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16058 unsigned long mmu_seq;
16059 bool map_writable;
16060
16061 + pax_track_stack();
16062 +
16063 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16064
16065 r = mmu_topup_memory_caches(vcpu);
16066 @@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16067 if (need_flush)
16068 kvm_flush_remote_tlbs(vcpu->kvm);
16069
16070 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16071 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16072
16073 spin_unlock(&vcpu->kvm->mmu_lock);
16074
16075 diff -urNp linux-3.0.4/arch/x86/kvm/svm.c linux-3.0.4/arch/x86/kvm/svm.c
16076 --- linux-3.0.4/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16077 +++ linux-3.0.4/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16078 @@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16079 int cpu = raw_smp_processor_id();
16080
16081 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16082 +
16083 + pax_open_kernel();
16084 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16085 + pax_close_kernel();
16086 +
16087 load_TR_desc();
16088 }
16089
16090 @@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16091 #endif
16092 #endif
16093
16094 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16095 + __set_fs(current_thread_info()->addr_limit);
16096 +#endif
16097 +
16098 reload_tss(vcpu);
16099
16100 local_irq_disable();
16101 diff -urNp linux-3.0.4/arch/x86/kvm/vmx.c linux-3.0.4/arch/x86/kvm/vmx.c
16102 --- linux-3.0.4/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16103 +++ linux-3.0.4/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16104 @@ -797,7 +797,11 @@ static void reload_tss(void)
16105 struct desc_struct *descs;
16106
16107 descs = (void *)gdt->address;
16108 +
16109 + pax_open_kernel();
16110 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16111 + pax_close_kernel();
16112 +
16113 load_TR_desc();
16114 }
16115
16116 @@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16117 if (!cpu_has_vmx_flexpriority())
16118 flexpriority_enabled = 0;
16119
16120 - if (!cpu_has_vmx_tpr_shadow())
16121 - kvm_x86_ops->update_cr8_intercept = NULL;
16122 + if (!cpu_has_vmx_tpr_shadow()) {
16123 + pax_open_kernel();
16124 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16125 + pax_close_kernel();
16126 + }
16127
16128 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16129 kvm_disable_largepages();
16130 @@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16131 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16132
16133 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16134 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16135 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16136 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16137 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16138 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16139 @@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16140 "jmp .Lkvm_vmx_return \n\t"
16141 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16142 ".Lkvm_vmx_return: "
16143 +
16144 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16145 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16146 + ".Lkvm_vmx_return2: "
16147 +#endif
16148 +
16149 /* Save guest registers, load host registers, keep flags */
16150 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16151 "pop %0 \n\t"
16152 @@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16153 #endif
16154 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16155 [wordsize]"i"(sizeof(ulong))
16156 +
16157 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16158 + ,[cs]"i"(__KERNEL_CS)
16159 +#endif
16160 +
16161 : "cc", "memory"
16162 , R"ax", R"bx", R"di", R"si"
16163 #ifdef CONFIG_X86_64
16164 @@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16165
16166 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16167
16168 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16169 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16170 +
16171 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16172 + loadsegment(fs, __KERNEL_PERCPU);
16173 +#endif
16174 +
16175 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16176 + __set_fs(current_thread_info()->addr_limit);
16177 +#endif
16178 +
16179 vmx->launched = 1;
16180
16181 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16182 diff -urNp linux-3.0.4/arch/x86/kvm/x86.c linux-3.0.4/arch/x86/kvm/x86.c
16183 --- linux-3.0.4/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16184 +++ linux-3.0.4/arch/x86/kvm/x86.c 2011-08-23 21:47:55.000000000 -0400
16185 @@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16186 if (n < msr_list.nmsrs)
16187 goto out;
16188 r = -EFAULT;
16189 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16190 + goto out;
16191 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16192 num_msrs_to_save * sizeof(u32)))
16193 goto out;
16194 @@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16195 struct kvm_cpuid2 *cpuid,
16196 struct kvm_cpuid_entry2 __user *entries)
16197 {
16198 - int r;
16199 + int r, i;
16200
16201 r = -E2BIG;
16202 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16203 goto out;
16204 r = -EFAULT;
16205 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16206 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16207 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16208 goto out;
16209 + for (i = 0; i < cpuid->nent; ++i) {
16210 + struct kvm_cpuid_entry2 cpuid_entry;
16211 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16212 + goto out;
16213 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16214 + }
16215 vcpu->arch.cpuid_nent = cpuid->nent;
16216 kvm_apic_set_version(vcpu);
16217 kvm_x86_ops->cpuid_update(vcpu);
16218 @@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16219 struct kvm_cpuid2 *cpuid,
16220 struct kvm_cpuid_entry2 __user *entries)
16221 {
16222 - int r;
16223 + int r, i;
16224
16225 r = -E2BIG;
16226 if (cpuid->nent < vcpu->arch.cpuid_nent)
16227 goto out;
16228 r = -EFAULT;
16229 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16230 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16231 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16232 goto out;
16233 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16234 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16235 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16236 + goto out;
16237 + }
16238 return 0;
16239
16240 out:
16241 @@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16242 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16243 struct kvm_interrupt *irq)
16244 {
16245 - if (irq->irq < 0 || irq->irq >= 256)
16246 + if (irq->irq >= 256)
16247 return -EINVAL;
16248 if (irqchip_in_kernel(vcpu->kvm))
16249 return -ENXIO;
16250 @@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16251 }
16252 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16253
16254 -int kvm_arch_init(void *opaque)
16255 +int kvm_arch_init(const void *opaque)
16256 {
16257 int r;
16258 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16259 diff -urNp linux-3.0.4/arch/x86/lguest/boot.c linux-3.0.4/arch/x86/lguest/boot.c
16260 --- linux-3.0.4/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16261 +++ linux-3.0.4/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16262 @@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16263 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16264 * Launcher to reboot us.
16265 */
16266 -static void lguest_restart(char *reason)
16267 +static __noreturn void lguest_restart(char *reason)
16268 {
16269 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16270 + BUG();
16271 }
16272
16273 /*G:050
16274 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_32.c linux-3.0.4/arch/x86/lib/atomic64_32.c
16275 --- linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16276 +++ linux-3.0.4/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16277 @@ -8,18 +8,30 @@
16278
16279 long long atomic64_read_cx8(long long, const atomic64_t *v);
16280 EXPORT_SYMBOL(atomic64_read_cx8);
16281 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16282 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16283 long long atomic64_set_cx8(long long, const atomic64_t *v);
16284 EXPORT_SYMBOL(atomic64_set_cx8);
16285 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16286 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16287 long long atomic64_xchg_cx8(long long, unsigned high);
16288 EXPORT_SYMBOL(atomic64_xchg_cx8);
16289 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16290 EXPORT_SYMBOL(atomic64_add_return_cx8);
16291 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16292 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16293 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16294 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16295 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16296 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16297 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16298 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16299 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16300 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16301 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16302 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16303 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16304 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16305 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16306 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16307 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16308 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16309 #ifndef CONFIG_X86_CMPXCHG64
16310 long long atomic64_read_386(long long, const atomic64_t *v);
16311 EXPORT_SYMBOL(atomic64_read_386);
16312 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16313 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16314 long long atomic64_set_386(long long, const atomic64_t *v);
16315 EXPORT_SYMBOL(atomic64_set_386);
16316 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16317 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16318 long long atomic64_xchg_386(long long, unsigned high);
16319 EXPORT_SYMBOL(atomic64_xchg_386);
16320 long long atomic64_add_return_386(long long a, atomic64_t *v);
16321 EXPORT_SYMBOL(atomic64_add_return_386);
16322 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16323 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16324 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16325 EXPORT_SYMBOL(atomic64_sub_return_386);
16326 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16327 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16328 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16329 EXPORT_SYMBOL(atomic64_inc_return_386);
16330 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16331 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16332 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16333 EXPORT_SYMBOL(atomic64_dec_return_386);
16334 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16335 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16336 long long atomic64_add_386(long long a, atomic64_t *v);
16337 EXPORT_SYMBOL(atomic64_add_386);
16338 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16339 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16340 long long atomic64_sub_386(long long a, atomic64_t *v);
16341 EXPORT_SYMBOL(atomic64_sub_386);
16342 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16343 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16344 long long atomic64_inc_386(long long a, atomic64_t *v);
16345 EXPORT_SYMBOL(atomic64_inc_386);
16346 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16347 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16348 long long atomic64_dec_386(long long a, atomic64_t *v);
16349 EXPORT_SYMBOL(atomic64_dec_386);
16350 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16351 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16352 long long atomic64_dec_if_positive_386(atomic64_t *v);
16353 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16354 int atomic64_inc_not_zero_386(atomic64_t *v);
16355 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_386_32.S linux-3.0.4/arch/x86/lib/atomic64_386_32.S
16356 --- linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16357 +++ linux-3.0.4/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16358 @@ -48,6 +48,10 @@ BEGIN(read)
16359 movl (v), %eax
16360 movl 4(v), %edx
16361 RET_ENDP
16362 +BEGIN(read_unchecked)
16363 + movl (v), %eax
16364 + movl 4(v), %edx
16365 +RET_ENDP
16366 #undef v
16367
16368 #define v %esi
16369 @@ -55,6 +59,10 @@ BEGIN(set)
16370 movl %ebx, (v)
16371 movl %ecx, 4(v)
16372 RET_ENDP
16373 +BEGIN(set_unchecked)
16374 + movl %ebx, (v)
16375 + movl %ecx, 4(v)
16376 +RET_ENDP
16377 #undef v
16378
16379 #define v %esi
16380 @@ -70,6 +78,20 @@ RET_ENDP
16381 BEGIN(add)
16382 addl %eax, (v)
16383 adcl %edx, 4(v)
16384 +
16385 +#ifdef CONFIG_PAX_REFCOUNT
16386 + jno 0f
16387 + subl %eax, (v)
16388 + sbbl %edx, 4(v)
16389 + int $4
16390 +0:
16391 + _ASM_EXTABLE(0b, 0b)
16392 +#endif
16393 +
16394 +RET_ENDP
16395 +BEGIN(add_unchecked)
16396 + addl %eax, (v)
16397 + adcl %edx, 4(v)
16398 RET_ENDP
16399 #undef v
16400
16401 @@ -77,6 +99,24 @@ RET_ENDP
16402 BEGIN(add_return)
16403 addl (v), %eax
16404 adcl 4(v), %edx
16405 +
16406 +#ifdef CONFIG_PAX_REFCOUNT
16407 + into
16408 +1234:
16409 + _ASM_EXTABLE(1234b, 2f)
16410 +#endif
16411 +
16412 + movl %eax, (v)
16413 + movl %edx, 4(v)
16414 +
16415 +#ifdef CONFIG_PAX_REFCOUNT
16416 +2:
16417 +#endif
16418 +
16419 +RET_ENDP
16420 +BEGIN(add_return_unchecked)
16421 + addl (v), %eax
16422 + adcl 4(v), %edx
16423 movl %eax, (v)
16424 movl %edx, 4(v)
16425 RET_ENDP
16426 @@ -86,6 +126,20 @@ RET_ENDP
16427 BEGIN(sub)
16428 subl %eax, (v)
16429 sbbl %edx, 4(v)
16430 +
16431 +#ifdef CONFIG_PAX_REFCOUNT
16432 + jno 0f
16433 + addl %eax, (v)
16434 + adcl %edx, 4(v)
16435 + int $4
16436 +0:
16437 + _ASM_EXTABLE(0b, 0b)
16438 +#endif
16439 +
16440 +RET_ENDP
16441 +BEGIN(sub_unchecked)
16442 + subl %eax, (v)
16443 + sbbl %edx, 4(v)
16444 RET_ENDP
16445 #undef v
16446
16447 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16448 sbbl $0, %edx
16449 addl (v), %eax
16450 adcl 4(v), %edx
16451 +
16452 +#ifdef CONFIG_PAX_REFCOUNT
16453 + into
16454 +1234:
16455 + _ASM_EXTABLE(1234b, 2f)
16456 +#endif
16457 +
16458 + movl %eax, (v)
16459 + movl %edx, 4(v)
16460 +
16461 +#ifdef CONFIG_PAX_REFCOUNT
16462 +2:
16463 +#endif
16464 +
16465 +RET_ENDP
16466 +BEGIN(sub_return_unchecked)
16467 + negl %edx
16468 + negl %eax
16469 + sbbl $0, %edx
16470 + addl (v), %eax
16471 + adcl 4(v), %edx
16472 movl %eax, (v)
16473 movl %edx, 4(v)
16474 RET_ENDP
16475 @@ -105,6 +180,20 @@ RET_ENDP
16476 BEGIN(inc)
16477 addl $1, (v)
16478 adcl $0, 4(v)
16479 +
16480 +#ifdef CONFIG_PAX_REFCOUNT
16481 + jno 0f
16482 + subl $1, (v)
16483 + sbbl $0, 4(v)
16484 + int $4
16485 +0:
16486 + _ASM_EXTABLE(0b, 0b)
16487 +#endif
16488 +
16489 +RET_ENDP
16490 +BEGIN(inc_unchecked)
16491 + addl $1, (v)
16492 + adcl $0, 4(v)
16493 RET_ENDP
16494 #undef v
16495
16496 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16497 movl 4(v), %edx
16498 addl $1, %eax
16499 adcl $0, %edx
16500 +
16501 +#ifdef CONFIG_PAX_REFCOUNT
16502 + into
16503 +1234:
16504 + _ASM_EXTABLE(1234b, 2f)
16505 +#endif
16506 +
16507 + movl %eax, (v)
16508 + movl %edx, 4(v)
16509 +
16510 +#ifdef CONFIG_PAX_REFCOUNT
16511 +2:
16512 +#endif
16513 +
16514 +RET_ENDP
16515 +BEGIN(inc_return_unchecked)
16516 + movl (v), %eax
16517 + movl 4(v), %edx
16518 + addl $1, %eax
16519 + adcl $0, %edx
16520 movl %eax, (v)
16521 movl %edx, 4(v)
16522 RET_ENDP
16523 @@ -123,6 +232,20 @@ RET_ENDP
16524 BEGIN(dec)
16525 subl $1, (v)
16526 sbbl $0, 4(v)
16527 +
16528 +#ifdef CONFIG_PAX_REFCOUNT
16529 + jno 0f
16530 + addl $1, (v)
16531 + adcl $0, 4(v)
16532 + int $4
16533 +0:
16534 + _ASM_EXTABLE(0b, 0b)
16535 +#endif
16536 +
16537 +RET_ENDP
16538 +BEGIN(dec_unchecked)
16539 + subl $1, (v)
16540 + sbbl $0, 4(v)
16541 RET_ENDP
16542 #undef v
16543
16544 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16545 movl 4(v), %edx
16546 subl $1, %eax
16547 sbbl $0, %edx
16548 +
16549 +#ifdef CONFIG_PAX_REFCOUNT
16550 + into
16551 +1234:
16552 + _ASM_EXTABLE(1234b, 2f)
16553 +#endif
16554 +
16555 + movl %eax, (v)
16556 + movl %edx, 4(v)
16557 +
16558 +#ifdef CONFIG_PAX_REFCOUNT
16559 +2:
16560 +#endif
16561 +
16562 +RET_ENDP
16563 +BEGIN(dec_return_unchecked)
16564 + movl (v), %eax
16565 + movl 4(v), %edx
16566 + subl $1, %eax
16567 + sbbl $0, %edx
16568 movl %eax, (v)
16569 movl %edx, 4(v)
16570 RET_ENDP
16571 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16572 adcl %edx, %edi
16573 addl (v), %eax
16574 adcl 4(v), %edx
16575 +
16576 +#ifdef CONFIG_PAX_REFCOUNT
16577 + into
16578 +1234:
16579 + _ASM_EXTABLE(1234b, 2f)
16580 +#endif
16581 +
16582 cmpl %eax, %esi
16583 je 3f
16584 1:
16585 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16586 1:
16587 addl $1, %eax
16588 adcl $0, %edx
16589 +
16590 +#ifdef CONFIG_PAX_REFCOUNT
16591 + into
16592 +1234:
16593 + _ASM_EXTABLE(1234b, 2f)
16594 +#endif
16595 +
16596 movl %eax, (v)
16597 movl %edx, 4(v)
16598 movl $1, %eax
16599 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16600 movl 4(v), %edx
16601 subl $1, %eax
16602 sbbl $0, %edx
16603 +
16604 +#ifdef CONFIG_PAX_REFCOUNT
16605 + into
16606 +1234:
16607 + _ASM_EXTABLE(1234b, 1f)
16608 +#endif
16609 +
16610 js 1f
16611 movl %eax, (v)
16612 movl %edx, 4(v)
16613 diff -urNp linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S
16614 --- linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16615 +++ linux-3.0.4/arch/x86/lib/atomic64_cx8_32.S 2011-09-17 18:31:51.000000000 -0400
16616 @@ -35,10 +35,24 @@ ENTRY(atomic64_read_cx8)
16617 CFI_STARTPROC
16618
16619 read64 %ecx
16620 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16621 + orb $0x80, 0x7(%rsp)
16622 +#endif
16623 ret
16624 CFI_ENDPROC
16625 ENDPROC(atomic64_read_cx8)
16626
16627 +ENTRY(atomic64_read_unchecked_cx8)
16628 + CFI_STARTPROC
16629 +
16630 + read64 %ecx
16631 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16632 + orb $0x80, 0x7(%rsp)
16633 +#endif
16634 + ret
16635 + CFI_ENDPROC
16636 +ENDPROC(atomic64_read_unchecked_cx8)
16637 +
16638 ENTRY(atomic64_set_cx8)
16639 CFI_STARTPROC
16640
16641 @@ -48,10 +62,29 @@ ENTRY(atomic64_set_cx8)
16642 cmpxchg8b (%esi)
16643 jne 1b
16644
16645 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16646 + orb $0x80, 0x7(%rsp)
16647 +#endif
16648 ret
16649 CFI_ENDPROC
16650 ENDPROC(atomic64_set_cx8)
16651
16652 +ENTRY(atomic64_set_unchecked_cx8)
16653 + CFI_STARTPROC
16654 +
16655 +1:
16656 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16657 + * are atomic on 586 and newer */
16658 + cmpxchg8b (%esi)
16659 + jne 1b
16660 +
16661 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16662 + orb $0x80, 0x7(%rsp)
16663 +#endif
16664 + ret
16665 + CFI_ENDPROC
16666 +ENDPROC(atomic64_set_unchecked_cx8)
16667 +
16668 ENTRY(atomic64_xchg_cx8)
16669 CFI_STARTPROC
16670
16671 @@ -62,12 +95,15 @@ ENTRY(atomic64_xchg_cx8)
16672 cmpxchg8b (%esi)
16673 jne 1b
16674
16675 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16676 + orb $0x80, 0x7(%rsp)
16677 +#endif
16678 ret
16679 CFI_ENDPROC
16680 ENDPROC(atomic64_xchg_cx8)
16681
16682 -.macro addsub_return func ins insc
16683 -ENTRY(atomic64_\func\()_return_cx8)
16684 +.macro addsub_return func ins insc unchecked=""
16685 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16686 CFI_STARTPROC
16687 SAVE ebp
16688 SAVE ebx
16689 @@ -84,27 +120,46 @@ ENTRY(atomic64_\func\()_return_cx8)
16690 movl %edx, %ecx
16691 \ins\()l %esi, %ebx
16692 \insc\()l %edi, %ecx
16693 +
16694 +.ifb \unchecked
16695 +#ifdef CONFIG_PAX_REFCOUNT
16696 + into
16697 +2:
16698 + _ASM_EXTABLE(2b, 3f)
16699 +#endif
16700 +.endif
16701 +
16702 LOCK_PREFIX
16703 cmpxchg8b (%ebp)
16704 jne 1b
16705 -
16706 -10:
16707 movl %ebx, %eax
16708 movl %ecx, %edx
16709 +
16710 +.ifb \unchecked
16711 +#ifdef CONFIG_PAX_REFCOUNT
16712 +3:
16713 +#endif
16714 +.endif
16715 +
16716 RESTORE edi
16717 RESTORE esi
16718 RESTORE ebx
16719 RESTORE ebp
16720 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16721 + orb $0x80, 0x7(%rsp)
16722 +#endif
16723 ret
16724 CFI_ENDPROC
16725 -ENDPROC(atomic64_\func\()_return_cx8)
16726 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16727 .endm
16728
16729 addsub_return add add adc
16730 addsub_return sub sub sbb
16731 +addsub_return add add adc _unchecked
16732 +addsub_return sub sub sbb _unchecked
16733
16734 -.macro incdec_return func ins insc
16735 -ENTRY(atomic64_\func\()_return_cx8)
16736 +.macro incdec_return func ins insc unchecked
16737 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16738 CFI_STARTPROC
16739 SAVE ebx
16740
16741 @@ -114,21 +169,41 @@ ENTRY(atomic64_\func\()_return_cx8)
16742 movl %edx, %ecx
16743 \ins\()l $1, %ebx
16744 \insc\()l $0, %ecx
16745 +
16746 +.ifb \unchecked
16747 +#ifdef CONFIG_PAX_REFCOUNT
16748 + into
16749 +2:
16750 + _ASM_EXTABLE(2b, 3f)
16751 +#endif
16752 +.endif
16753 +
16754 LOCK_PREFIX
16755 cmpxchg8b (%esi)
16756 jne 1b
16757
16758 -10:
16759 movl %ebx, %eax
16760 movl %ecx, %edx
16761 +
16762 +.ifb \unchecked
16763 +#ifdef CONFIG_PAX_REFCOUNT
16764 +3:
16765 +#endif
16766 +.endif
16767 +
16768 RESTORE ebx
16769 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16770 + orb $0x80, 0x7(%rsp)
16771 +#endif
16772 ret
16773 CFI_ENDPROC
16774 -ENDPROC(atomic64_\func\()_return_cx8)
16775 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16776 .endm
16777
16778 incdec_return inc add adc
16779 incdec_return dec sub sbb
16780 +incdec_return inc add adc _unchecked
16781 +incdec_return dec sub sbb _unchecked
16782
16783 ENTRY(atomic64_dec_if_positive_cx8)
16784 CFI_STARTPROC
16785 @@ -140,6 +215,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16786 movl %edx, %ecx
16787 subl $1, %ebx
16788 sbb $0, %ecx
16789 +
16790 +#ifdef CONFIG_PAX_REFCOUNT
16791 + into
16792 +1234:
16793 + _ASM_EXTABLE(1234b, 2f)
16794 +#endif
16795 +
16796 js 2f
16797 LOCK_PREFIX
16798 cmpxchg8b (%esi)
16799 @@ -149,6 +231,9 @@ ENTRY(atomic64_dec_if_positive_cx8)
16800 movl %ebx, %eax
16801 movl %ecx, %edx
16802 RESTORE ebx
16803 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16804 + orb $0x80, 0x7(%rsp)
16805 +#endif
16806 ret
16807 CFI_ENDPROC
16808 ENDPROC(atomic64_dec_if_positive_cx8)
16809 @@ -174,6 +259,13 @@ ENTRY(atomic64_add_unless_cx8)
16810 movl %edx, %ecx
16811 addl %esi, %ebx
16812 adcl %edi, %ecx
16813 +
16814 +#ifdef CONFIG_PAX_REFCOUNT
16815 + into
16816 +1234:
16817 + _ASM_EXTABLE(1234b, 3f)
16818 +#endif
16819 +
16820 LOCK_PREFIX
16821 cmpxchg8b (%ebp)
16822 jne 1b
16823 @@ -184,6 +276,9 @@ ENTRY(atomic64_add_unless_cx8)
16824 CFI_ADJUST_CFA_OFFSET -8
16825 RESTORE ebx
16826 RESTORE ebp
16827 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16828 + orb $0x80, 0x7(%rsp)
16829 +#endif
16830 ret
16831 4:
16832 cmpl %edx, 4(%esp)
16833 @@ -206,6 +301,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16834 movl %edx, %ecx
16835 addl $1, %ebx
16836 adcl $0, %ecx
16837 +
16838 +#ifdef CONFIG_PAX_REFCOUNT
16839 + into
16840 +1234:
16841 + _ASM_EXTABLE(1234b, 3f)
16842 +#endif
16843 +
16844 LOCK_PREFIX
16845 cmpxchg8b (%esi)
16846 jne 1b
16847 @@ -213,6 +315,9 @@ ENTRY(atomic64_inc_not_zero_cx8)
16848 movl $1, %eax
16849 3:
16850 RESTORE ebx
16851 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
16852 + orb $0x80, 0x7(%rsp)
16853 +#endif
16854 ret
16855 4:
16856 testl %edx, %edx
16857 diff -urNp linux-3.0.4/arch/x86/lib/checksum_32.S linux-3.0.4/arch/x86/lib/checksum_32.S
16858 --- linux-3.0.4/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
16859 +++ linux-3.0.4/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
16860 @@ -28,7 +28,8 @@
16861 #include <linux/linkage.h>
16862 #include <asm/dwarf2.h>
16863 #include <asm/errno.h>
16864 -
16865 +#include <asm/segment.h>
16866 +
16867 /*
16868 * computes a partial checksum, e.g. for TCP/UDP fragments
16869 */
16870 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16871
16872 #define ARGBASE 16
16873 #define FP 12
16874 -
16875 -ENTRY(csum_partial_copy_generic)
16876 +
16877 +ENTRY(csum_partial_copy_generic_to_user)
16878 CFI_STARTPROC
16879 +
16880 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16881 + pushl_cfi %gs
16882 + popl_cfi %es
16883 + jmp csum_partial_copy_generic
16884 +#endif
16885 +
16886 +ENTRY(csum_partial_copy_generic_from_user)
16887 +
16888 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16889 + pushl_cfi %gs
16890 + popl_cfi %ds
16891 +#endif
16892 +
16893 +ENTRY(csum_partial_copy_generic)
16894 subl $4,%esp
16895 CFI_ADJUST_CFA_OFFSET 4
16896 pushl_cfi %edi
16897 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16898 jmp 4f
16899 SRC(1: movw (%esi), %bx )
16900 addl $2, %esi
16901 -DST( movw %bx, (%edi) )
16902 +DST( movw %bx, %es:(%edi) )
16903 addl $2, %edi
16904 addw %bx, %ax
16905 adcl $0, %eax
16906 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16907 SRC(1: movl (%esi), %ebx )
16908 SRC( movl 4(%esi), %edx )
16909 adcl %ebx, %eax
16910 -DST( movl %ebx, (%edi) )
16911 +DST( movl %ebx, %es:(%edi) )
16912 adcl %edx, %eax
16913 -DST( movl %edx, 4(%edi) )
16914 +DST( movl %edx, %es:4(%edi) )
16915
16916 SRC( movl 8(%esi), %ebx )
16917 SRC( movl 12(%esi), %edx )
16918 adcl %ebx, %eax
16919 -DST( movl %ebx, 8(%edi) )
16920 +DST( movl %ebx, %es:8(%edi) )
16921 adcl %edx, %eax
16922 -DST( movl %edx, 12(%edi) )
16923 +DST( movl %edx, %es:12(%edi) )
16924
16925 SRC( movl 16(%esi), %ebx )
16926 SRC( movl 20(%esi), %edx )
16927 adcl %ebx, %eax
16928 -DST( movl %ebx, 16(%edi) )
16929 +DST( movl %ebx, %es:16(%edi) )
16930 adcl %edx, %eax
16931 -DST( movl %edx, 20(%edi) )
16932 +DST( movl %edx, %es:20(%edi) )
16933
16934 SRC( movl 24(%esi), %ebx )
16935 SRC( movl 28(%esi), %edx )
16936 adcl %ebx, %eax
16937 -DST( movl %ebx, 24(%edi) )
16938 +DST( movl %ebx, %es:24(%edi) )
16939 adcl %edx, %eax
16940 -DST( movl %edx, 28(%edi) )
16941 +DST( movl %edx, %es:28(%edi) )
16942
16943 lea 32(%esi), %esi
16944 lea 32(%edi), %edi
16945 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16946 shrl $2, %edx # This clears CF
16947 SRC(3: movl (%esi), %ebx )
16948 adcl %ebx, %eax
16949 -DST( movl %ebx, (%edi) )
16950 +DST( movl %ebx, %es:(%edi) )
16951 lea 4(%esi), %esi
16952 lea 4(%edi), %edi
16953 dec %edx
16954 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16955 jb 5f
16956 SRC( movw (%esi), %cx )
16957 leal 2(%esi), %esi
16958 -DST( movw %cx, (%edi) )
16959 +DST( movw %cx, %es:(%edi) )
16960 leal 2(%edi), %edi
16961 je 6f
16962 shll $16,%ecx
16963 SRC(5: movb (%esi), %cl )
16964 -DST( movb %cl, (%edi) )
16965 +DST( movb %cl, %es:(%edi) )
16966 6: addl %ecx, %eax
16967 adcl $0, %eax
16968 7:
16969 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16970
16971 6001:
16972 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16973 - movl $-EFAULT, (%ebx)
16974 + movl $-EFAULT, %ss:(%ebx)
16975
16976 # zero the complete destination - computing the rest
16977 # is too much work
16978 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16979
16980 6002:
16981 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16982 - movl $-EFAULT,(%ebx)
16983 + movl $-EFAULT,%ss:(%ebx)
16984 jmp 5000b
16985
16986 .previous
16987
16988 + pushl_cfi %ss
16989 + popl_cfi %ds
16990 + pushl_cfi %ss
16991 + popl_cfi %es
16992 popl_cfi %ebx
16993 CFI_RESTORE ebx
16994 popl_cfi %esi
16995 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16996 popl_cfi %ecx # equivalent to addl $4,%esp
16997 ret
16998 CFI_ENDPROC
16999 -ENDPROC(csum_partial_copy_generic)
17000 +ENDPROC(csum_partial_copy_generic_to_user)
17001
17002 #else
17003
17004 /* Version for PentiumII/PPro */
17005
17006 #define ROUND1(x) \
17007 + nop; nop; nop; \
17008 SRC(movl x(%esi), %ebx ) ; \
17009 addl %ebx, %eax ; \
17010 - DST(movl %ebx, x(%edi) ) ;
17011 + DST(movl %ebx, %es:x(%edi)) ;
17012
17013 #define ROUND(x) \
17014 + nop; nop; nop; \
17015 SRC(movl x(%esi), %ebx ) ; \
17016 adcl %ebx, %eax ; \
17017 - DST(movl %ebx, x(%edi) ) ;
17018 + DST(movl %ebx, %es:x(%edi)) ;
17019
17020 #define ARGBASE 12
17021 -
17022 -ENTRY(csum_partial_copy_generic)
17023 +
17024 +ENTRY(csum_partial_copy_generic_to_user)
17025 CFI_STARTPROC
17026 +
17027 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17028 + pushl_cfi %gs
17029 + popl_cfi %es
17030 + jmp csum_partial_copy_generic
17031 +#endif
17032 +
17033 +ENTRY(csum_partial_copy_generic_from_user)
17034 +
17035 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17036 + pushl_cfi %gs
17037 + popl_cfi %ds
17038 +#endif
17039 +
17040 +ENTRY(csum_partial_copy_generic)
17041 pushl_cfi %ebx
17042 CFI_REL_OFFSET ebx, 0
17043 pushl_cfi %edi
17044 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17045 subl %ebx, %edi
17046 lea -1(%esi),%edx
17047 andl $-32,%edx
17048 - lea 3f(%ebx,%ebx), %ebx
17049 + lea 3f(%ebx,%ebx,2), %ebx
17050 testl %esi, %esi
17051 jmp *%ebx
17052 1: addl $64,%esi
17053 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17054 jb 5f
17055 SRC( movw (%esi), %dx )
17056 leal 2(%esi), %esi
17057 -DST( movw %dx, (%edi) )
17058 +DST( movw %dx, %es:(%edi) )
17059 leal 2(%edi), %edi
17060 je 6f
17061 shll $16,%edx
17062 5:
17063 SRC( movb (%esi), %dl )
17064 -DST( movb %dl, (%edi) )
17065 +DST( movb %dl, %es:(%edi) )
17066 6: addl %edx, %eax
17067 adcl $0, %eax
17068 7:
17069 .section .fixup, "ax"
17070 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17071 - movl $-EFAULT, (%ebx)
17072 + movl $-EFAULT, %ss:(%ebx)
17073 # zero the complete destination (computing the rest is too much work)
17074 movl ARGBASE+8(%esp),%edi # dst
17075 movl ARGBASE+12(%esp),%ecx # len
17076 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17077 rep; stosb
17078 jmp 7b
17079 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17080 - movl $-EFAULT, (%ebx)
17081 + movl $-EFAULT, %ss:(%ebx)
17082 jmp 7b
17083 .previous
17084
17085 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17086 + pushl_cfi %ss
17087 + popl_cfi %ds
17088 + pushl_cfi %ss
17089 + popl_cfi %es
17090 +#endif
17091 +
17092 popl_cfi %esi
17093 CFI_RESTORE esi
17094 popl_cfi %edi
17095 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17096 CFI_RESTORE ebx
17097 ret
17098 CFI_ENDPROC
17099 -ENDPROC(csum_partial_copy_generic)
17100 +ENDPROC(csum_partial_copy_generic_to_user)
17101
17102 #undef ROUND
17103 #undef ROUND1
17104 diff -urNp linux-3.0.4/arch/x86/lib/clear_page_64.S linux-3.0.4/arch/x86/lib/clear_page_64.S
17105 --- linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17106 +++ linux-3.0.4/arch/x86/lib/clear_page_64.S 2011-09-17 18:31:51.000000000 -0400
17107 @@ -11,6 +11,9 @@ ENTRY(clear_page_c)
17108 movl $4096/8,%ecx
17109 xorl %eax,%eax
17110 rep stosq
17111 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17112 + orb $0x80, 0x7(%rsp)
17113 +#endif
17114 ret
17115 CFI_ENDPROC
17116 ENDPROC(clear_page_c)
17117 @@ -20,6 +23,9 @@ ENTRY(clear_page_c_e)
17118 movl $4096,%ecx
17119 xorl %eax,%eax
17120 rep stosb
17121 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17122 + orb $0x80, 0x7(%rsp)
17123 +#endif
17124 ret
17125 CFI_ENDPROC
17126 ENDPROC(clear_page_c_e)
17127 @@ -43,6 +49,9 @@ ENTRY(clear_page)
17128 leaq 64(%rdi),%rdi
17129 jnz .Lloop
17130 nop
17131 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17132 + orb $0x80, 0x7(%rsp)
17133 +#endif
17134 ret
17135 CFI_ENDPROC
17136 .Lclear_page_end:
17137 @@ -58,7 +67,7 @@ ENDPROC(clear_page)
17138
17139 #include <asm/cpufeature.h>
17140
17141 - .section .altinstr_replacement,"ax"
17142 + .section .altinstr_replacement,"a"
17143 1: .byte 0xeb /* jmp <disp8> */
17144 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17145 2: .byte 0xeb /* jmp <disp8> */
17146 diff -urNp linux-3.0.4/arch/x86/lib/copy_page_64.S linux-3.0.4/arch/x86/lib/copy_page_64.S
17147 --- linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17148 +++ linux-3.0.4/arch/x86/lib/copy_page_64.S 2011-09-17 18:31:51.000000000 -0400
17149 @@ -8,6 +8,9 @@ copy_page_c:
17150 CFI_STARTPROC
17151 movl $4096/8,%ecx
17152 rep movsq
17153 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17154 + orb $0x80, 0x7(%rsp)
17155 +#endif
17156 ret
17157 CFI_ENDPROC
17158 ENDPROC(copy_page_c)
17159 @@ -94,6 +97,9 @@ ENTRY(copy_page)
17160 CFI_RESTORE r13
17161 addq $3*8,%rsp
17162 CFI_ADJUST_CFA_OFFSET -3*8
17163 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17164 + orb $0x80, 0x7(%rsp)
17165 +#endif
17166 ret
17167 .Lcopy_page_end:
17168 CFI_ENDPROC
17169 @@ -104,7 +110,7 @@ ENDPROC(copy_page)
17170
17171 #include <asm/cpufeature.h>
17172
17173 - .section .altinstr_replacement,"ax"
17174 + .section .altinstr_replacement,"a"
17175 1: .byte 0xeb /* jmp <disp8> */
17176 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17177 2:
17178 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_64.S linux-3.0.4/arch/x86/lib/copy_user_64.S
17179 --- linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17180 +++ linux-3.0.4/arch/x86/lib/copy_user_64.S 2011-09-17 18:31:51.000000000 -0400
17181 @@ -16,6 +16,7 @@
17182 #include <asm/thread_info.h>
17183 #include <asm/cpufeature.h>
17184 #include <asm/alternative-asm.h>
17185 +#include <asm/pgtable.h>
17186
17187 /*
17188 * By placing feature2 after feature1 in altinstructions section, we logically
17189 @@ -29,7 +30,7 @@
17190 .byte 0xe9 /* 32bit jump */
17191 .long \orig-1f /* by default jump to orig */
17192 1:
17193 - .section .altinstr_replacement,"ax"
17194 + .section .altinstr_replacement,"a"
17195 2: .byte 0xe9 /* near jump with 32bit immediate */
17196 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17197 3: .byte 0xe9 /* near jump with 32bit immediate */
17198 @@ -71,47 +72,22 @@
17199 #endif
17200 .endm
17201
17202 -/* Standard copy_to_user with segment limit checking */
17203 -ENTRY(_copy_to_user)
17204 - CFI_STARTPROC
17205 - GET_THREAD_INFO(%rax)
17206 - movq %rdi,%rcx
17207 - addq %rdx,%rcx
17208 - jc bad_to_user
17209 - cmpq TI_addr_limit(%rax),%rcx
17210 - ja bad_to_user
17211 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17212 - copy_user_generic_unrolled,copy_user_generic_string, \
17213 - copy_user_enhanced_fast_string
17214 - CFI_ENDPROC
17215 -ENDPROC(_copy_to_user)
17216 -
17217 -/* Standard copy_from_user with segment limit checking */
17218 -ENTRY(_copy_from_user)
17219 - CFI_STARTPROC
17220 - GET_THREAD_INFO(%rax)
17221 - movq %rsi,%rcx
17222 - addq %rdx,%rcx
17223 - jc bad_from_user
17224 - cmpq TI_addr_limit(%rax),%rcx
17225 - ja bad_from_user
17226 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17227 - copy_user_generic_unrolled,copy_user_generic_string, \
17228 - copy_user_enhanced_fast_string
17229 - CFI_ENDPROC
17230 -ENDPROC(_copy_from_user)
17231 -
17232 .section .fixup,"ax"
17233 /* must zero dest */
17234 ENTRY(bad_from_user)
17235 bad_from_user:
17236 CFI_STARTPROC
17237 + testl %edx,%edx
17238 + js bad_to_user
17239 movl %edx,%ecx
17240 xorl %eax,%eax
17241 rep
17242 stosb
17243 bad_to_user:
17244 movl %edx,%eax
17245 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17246 + orb $0x80, 0x7(%rsp)
17247 +#endif
17248 ret
17249 CFI_ENDPROC
17250 ENDPROC(bad_from_user)
17251 @@ -179,6 +155,9 @@ ENTRY(copy_user_generic_unrolled)
17252 decl %ecx
17253 jnz 21b
17254 23: xor %eax,%eax
17255 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17256 + orb $0x80, 0x7(%rsp)
17257 +#endif
17258 ret
17259
17260 .section .fixup,"ax"
17261 @@ -251,6 +230,9 @@ ENTRY(copy_user_generic_string)
17262 3: rep
17263 movsb
17264 4: xorl %eax,%eax
17265 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17266 + orb $0x80, 0x7(%rsp)
17267 +#endif
17268 ret
17269
17270 .section .fixup,"ax"
17271 @@ -287,6 +269,9 @@ ENTRY(copy_user_enhanced_fast_string)
17272 1: rep
17273 movsb
17274 2: xorl %eax,%eax
17275 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17276 + orb $0x80, 0x7(%rsp)
17277 +#endif
17278 ret
17279
17280 .section .fixup,"ax"
17281 diff -urNp linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S
17282 --- linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17283 +++ linux-3.0.4/arch/x86/lib/copy_user_nocache_64.S 2011-09-17 18:31:51.000000000 -0400
17284 @@ -14,6 +14,7 @@
17285 #include <asm/current.h>
17286 #include <asm/asm-offsets.h>
17287 #include <asm/thread_info.h>
17288 +#include <asm/pgtable.h>
17289
17290 .macro ALIGN_DESTINATION
17291 #ifdef FIX_ALIGNMENT
17292 @@ -50,6 +51,15 @@
17293 */
17294 ENTRY(__copy_user_nocache)
17295 CFI_STARTPROC
17296 +
17297 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17298 + mov $PAX_USER_SHADOW_BASE,%rcx
17299 + cmp %rcx,%rsi
17300 + jae 1f
17301 + add %rcx,%rsi
17302 +1:
17303 +#endif
17304 +
17305 cmpl $8,%edx
17306 jb 20f /* less then 8 bytes, go to byte copy loop */
17307 ALIGN_DESTINATION
17308 @@ -98,6 +108,9 @@ ENTRY(__copy_user_nocache)
17309 jnz 21b
17310 23: xorl %eax,%eax
17311 sfence
17312 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17313 + orb $0x80, 0x7(%rsp)
17314 +#endif
17315 ret
17316
17317 .section .fixup,"ax"
17318 diff -urNp linux-3.0.4/arch/x86/lib/csum-copy_64.S linux-3.0.4/arch/x86/lib/csum-copy_64.S
17319 --- linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17320 +++ linux-3.0.4/arch/x86/lib/csum-copy_64.S 2011-09-17 18:31:51.000000000 -0400
17321 @@ -228,6 +228,9 @@ ENTRY(csum_partial_copy_generic)
17322 CFI_RESTORE rbp
17323 addq $7*8, %rsp
17324 CFI_ADJUST_CFA_OFFSET -7*8
17325 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17326 + orb $0x80, 0x7(%rsp)
17327 +#endif
17328 ret
17329 CFI_RESTORE_STATE
17330
17331 diff -urNp linux-3.0.4/arch/x86/lib/csum-wrappers_64.c linux-3.0.4/arch/x86/lib/csum-wrappers_64.c
17332 --- linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17333 +++ linux-3.0.4/arch/x86/lib/csum-wrappers_64.c 2011-08-23 21:47:55.000000000 -0400
17334 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17335 len -= 2;
17336 }
17337 }
17338 +
17339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17340 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17341 + src += PAX_USER_SHADOW_BASE;
17342 +#endif
17343 +
17344 isum = csum_partial_copy_generic((__force const void *)src,
17345 dst, len, isum, errp, NULL);
17346 if (unlikely(*errp))
17347 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17348 }
17349
17350 *errp = 0;
17351 +
17352 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17353 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17354 + dst += PAX_USER_SHADOW_BASE;
17355 +#endif
17356 +
17357 return csum_partial_copy_generic(src, (void __force *)dst,
17358 len, isum, NULL, errp);
17359 }
17360 diff -urNp linux-3.0.4/arch/x86/lib/getuser.S linux-3.0.4/arch/x86/lib/getuser.S
17361 --- linux-3.0.4/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17362 +++ linux-3.0.4/arch/x86/lib/getuser.S 2011-08-23 21:47:55.000000000 -0400
17363 @@ -33,14 +33,35 @@
17364 #include <asm/asm-offsets.h>
17365 #include <asm/thread_info.h>
17366 #include <asm/asm.h>
17367 +#include <asm/segment.h>
17368 +#include <asm/pgtable.h>
17369 +
17370 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17371 +#define __copyuser_seg gs;
17372 +#else
17373 +#define __copyuser_seg
17374 +#endif
17375
17376 .text
17377 ENTRY(__get_user_1)
17378 CFI_STARTPROC
17379 +
17380 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17381 GET_THREAD_INFO(%_ASM_DX)
17382 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17383 jae bad_get_user
17384 -1: movzb (%_ASM_AX),%edx
17385 +
17386 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17387 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17388 + cmp %_ASM_DX,%_ASM_AX
17389 + jae 1234f
17390 + add %_ASM_DX,%_ASM_AX
17391 +1234:
17392 +#endif
17393 +
17394 +#endif
17395 +
17396 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17397 xor %eax,%eax
17398 ret
17399 CFI_ENDPROC
17400 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17401 ENTRY(__get_user_2)
17402 CFI_STARTPROC
17403 add $1,%_ASM_AX
17404 +
17405 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17406 jc bad_get_user
17407 GET_THREAD_INFO(%_ASM_DX)
17408 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17409 jae bad_get_user
17410 -2: movzwl -1(%_ASM_AX),%edx
17411 +
17412 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17413 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17414 + cmp %_ASM_DX,%_ASM_AX
17415 + jae 1234f
17416 + add %_ASM_DX,%_ASM_AX
17417 +1234:
17418 +#endif
17419 +
17420 +#endif
17421 +
17422 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17423 xor %eax,%eax
17424 ret
17425 CFI_ENDPROC
17426 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17427 ENTRY(__get_user_4)
17428 CFI_STARTPROC
17429 add $3,%_ASM_AX
17430 +
17431 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17432 jc bad_get_user
17433 GET_THREAD_INFO(%_ASM_DX)
17434 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17435 jae bad_get_user
17436 -3: mov -3(%_ASM_AX),%edx
17437 +
17438 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17439 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17440 + cmp %_ASM_DX,%_ASM_AX
17441 + jae 1234f
17442 + add %_ASM_DX,%_ASM_AX
17443 +1234:
17444 +#endif
17445 +
17446 +#endif
17447 +
17448 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17449 xor %eax,%eax
17450 ret
17451 CFI_ENDPROC
17452 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17453 GET_THREAD_INFO(%_ASM_DX)
17454 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17455 jae bad_get_user
17456 +
17457 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17458 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17459 + cmp %_ASM_DX,%_ASM_AX
17460 + jae 1234f
17461 + add %_ASM_DX,%_ASM_AX
17462 +1234:
17463 +#endif
17464 +
17465 4: movq -7(%_ASM_AX),%_ASM_DX
17466 xor %eax,%eax
17467 ret
17468 diff -urNp linux-3.0.4/arch/x86/lib/insn.c linux-3.0.4/arch/x86/lib/insn.c
17469 --- linux-3.0.4/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17470 +++ linux-3.0.4/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17471 @@ -21,6 +21,11 @@
17472 #include <linux/string.h>
17473 #include <asm/inat.h>
17474 #include <asm/insn.h>
17475 +#ifdef __KERNEL__
17476 +#include <asm/pgtable_types.h>
17477 +#else
17478 +#define ktla_ktva(addr) addr
17479 +#endif
17480
17481 #define get_next(t, insn) \
17482 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17483 @@ -40,8 +45,8 @@
17484 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17485 {
17486 memset(insn, 0, sizeof(*insn));
17487 - insn->kaddr = kaddr;
17488 - insn->next_byte = kaddr;
17489 + insn->kaddr = ktla_ktva(kaddr);
17490 + insn->next_byte = ktla_ktva(kaddr);
17491 insn->x86_64 = x86_64 ? 1 : 0;
17492 insn->opnd_bytes = 4;
17493 if (x86_64)
17494 diff -urNp linux-3.0.4/arch/x86/lib/iomap_copy_64.S linux-3.0.4/arch/x86/lib/iomap_copy_64.S
17495 --- linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
17496 +++ linux-3.0.4/arch/x86/lib/iomap_copy_64.S 2011-09-17 18:31:51.000000000 -0400
17497 @@ -25,6 +25,9 @@ ENTRY(__iowrite32_copy)
17498 CFI_STARTPROC
17499 movl %edx,%ecx
17500 rep movsd
17501 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17502 + orb $0x80, 0x7(%rsp)
17503 +#endif
17504 ret
17505 CFI_ENDPROC
17506 ENDPROC(__iowrite32_copy)
17507 diff -urNp linux-3.0.4/arch/x86/lib/memcpy_64.S linux-3.0.4/arch/x86/lib/memcpy_64.S
17508 --- linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
17509 +++ linux-3.0.4/arch/x86/lib/memcpy_64.S 2011-09-17 18:31:51.000000000 -0400
17510 @@ -34,6 +34,9 @@
17511 rep movsq
17512 movl %edx, %ecx
17513 rep movsb
17514 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17515 + orb $0x80, 0x7(%rsp)
17516 +#endif
17517 ret
17518 .Lmemcpy_e:
17519 .previous
17520 @@ -51,6 +54,9 @@
17521
17522 movl %edx, %ecx
17523 rep movsb
17524 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17525 + orb $0x80, 0x7(%rsp)
17526 +#endif
17527 ret
17528 .Lmemcpy_e_e:
17529 .previous
17530 @@ -141,6 +147,9 @@ ENTRY(memcpy)
17531 movq %r9, 1*8(%rdi)
17532 movq %r10, -2*8(%rdi, %rdx)
17533 movq %r11, -1*8(%rdi, %rdx)
17534 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17535 + orb $0x80, 0x7(%rsp)
17536 +#endif
17537 retq
17538 .p2align 4
17539 .Lless_16bytes:
17540 @@ -153,6 +162,9 @@ ENTRY(memcpy)
17541 movq -1*8(%rsi, %rdx), %r9
17542 movq %r8, 0*8(%rdi)
17543 movq %r9, -1*8(%rdi, %rdx)
17544 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17545 + orb $0x80, 0x7(%rsp)
17546 +#endif
17547 retq
17548 .p2align 4
17549 .Lless_8bytes:
17550 @@ -166,6 +178,9 @@ ENTRY(memcpy)
17551 movl -4(%rsi, %rdx), %r8d
17552 movl %ecx, (%rdi)
17553 movl %r8d, -4(%rdi, %rdx)
17554 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17555 + orb $0x80, 0x7(%rsp)
17556 +#endif
17557 retq
17558 .p2align 4
17559 .Lless_3bytes:
17560 @@ -183,6 +198,9 @@ ENTRY(memcpy)
17561 jnz .Lloop_1
17562
17563 .Lend:
17564 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17565 + orb $0x80, 0x7(%rsp)
17566 +#endif
17567 retq
17568 CFI_ENDPROC
17569 ENDPROC(memcpy)
17570 diff -urNp linux-3.0.4/arch/x86/lib/memmove_64.S linux-3.0.4/arch/x86/lib/memmove_64.S
17571 --- linux-3.0.4/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
17572 +++ linux-3.0.4/arch/x86/lib/memmove_64.S 2011-09-17 18:31:51.000000000 -0400
17573 @@ -201,6 +201,9 @@ ENTRY(memmove)
17574 movb (%rsi), %r11b
17575 movb %r11b, (%rdi)
17576 13:
17577 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17578 + orb $0x80, 0x7(%rsp)
17579 +#endif
17580 retq
17581 CFI_ENDPROC
17582
17583 @@ -209,6 +212,9 @@ ENTRY(memmove)
17584 /* Forward moving data. */
17585 movq %rdx, %rcx
17586 rep movsb
17587 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17588 + orb $0x80, 0x7(%rsp)
17589 +#endif
17590 retq
17591 .Lmemmove_end_forward_efs:
17592 .previous
17593 diff -urNp linux-3.0.4/arch/x86/lib/memset_64.S linux-3.0.4/arch/x86/lib/memset_64.S
17594 --- linux-3.0.4/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
17595 +++ linux-3.0.4/arch/x86/lib/memset_64.S 2011-09-17 18:31:51.000000000 -0400
17596 @@ -31,6 +31,9 @@
17597 movl %r8d,%ecx
17598 rep stosb
17599 movq %r9,%rax
17600 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17601 + orb $0x80, 0x7(%rsp)
17602 +#endif
17603 ret
17604 .Lmemset_e:
17605 .previous
17606 @@ -53,6 +56,9 @@
17607 movl %edx,%ecx
17608 rep stosb
17609 movq %r9,%rax
17610 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17611 + orb $0x80, 0x7(%rsp)
17612 +#endif
17613 ret
17614 .Lmemset_e_e:
17615 .previous
17616 @@ -121,6 +127,9 @@ ENTRY(__memset)
17617
17618 .Lende:
17619 movq %r10,%rax
17620 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17621 + orb $0x80, 0x7(%rsp)
17622 +#endif
17623 ret
17624
17625 CFI_RESTORE_STATE
17626 diff -urNp linux-3.0.4/arch/x86/lib/mmx_32.c linux-3.0.4/arch/x86/lib/mmx_32.c
17627 --- linux-3.0.4/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17628 +++ linux-3.0.4/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17629 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17630 {
17631 void *p;
17632 int i;
17633 + unsigned long cr0;
17634
17635 if (unlikely(in_interrupt()))
17636 return __memcpy(to, from, len);
17637 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17638 kernel_fpu_begin();
17639
17640 __asm__ __volatile__ (
17641 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17642 - " prefetch 64(%0)\n"
17643 - " prefetch 128(%0)\n"
17644 - " prefetch 192(%0)\n"
17645 - " prefetch 256(%0)\n"
17646 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17647 + " prefetch 64(%1)\n"
17648 + " prefetch 128(%1)\n"
17649 + " prefetch 192(%1)\n"
17650 + " prefetch 256(%1)\n"
17651 "2: \n"
17652 ".section .fixup, \"ax\"\n"
17653 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17654 + "3: \n"
17655 +
17656 +#ifdef CONFIG_PAX_KERNEXEC
17657 + " movl %%cr0, %0\n"
17658 + " movl %0, %%eax\n"
17659 + " andl $0xFFFEFFFF, %%eax\n"
17660 + " movl %%eax, %%cr0\n"
17661 +#endif
17662 +
17663 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17664 +
17665 +#ifdef CONFIG_PAX_KERNEXEC
17666 + " movl %0, %%cr0\n"
17667 +#endif
17668 +
17669 " jmp 2b\n"
17670 ".previous\n"
17671 _ASM_EXTABLE(1b, 3b)
17672 - : : "r" (from));
17673 + : "=&r" (cr0) : "r" (from) : "ax");
17674
17675 for ( ; i > 5; i--) {
17676 __asm__ __volatile__ (
17677 - "1: prefetch 320(%0)\n"
17678 - "2: movq (%0), %%mm0\n"
17679 - " movq 8(%0), %%mm1\n"
17680 - " movq 16(%0), %%mm2\n"
17681 - " movq 24(%0), %%mm3\n"
17682 - " movq %%mm0, (%1)\n"
17683 - " movq %%mm1, 8(%1)\n"
17684 - " movq %%mm2, 16(%1)\n"
17685 - " movq %%mm3, 24(%1)\n"
17686 - " movq 32(%0), %%mm0\n"
17687 - " movq 40(%0), %%mm1\n"
17688 - " movq 48(%0), %%mm2\n"
17689 - " movq 56(%0), %%mm3\n"
17690 - " movq %%mm0, 32(%1)\n"
17691 - " movq %%mm1, 40(%1)\n"
17692 - " movq %%mm2, 48(%1)\n"
17693 - " movq %%mm3, 56(%1)\n"
17694 + "1: prefetch 320(%1)\n"
17695 + "2: movq (%1), %%mm0\n"
17696 + " movq 8(%1), %%mm1\n"
17697 + " movq 16(%1), %%mm2\n"
17698 + " movq 24(%1), %%mm3\n"
17699 + " movq %%mm0, (%2)\n"
17700 + " movq %%mm1, 8(%2)\n"
17701 + " movq %%mm2, 16(%2)\n"
17702 + " movq %%mm3, 24(%2)\n"
17703 + " movq 32(%1), %%mm0\n"
17704 + " movq 40(%1), %%mm1\n"
17705 + " movq 48(%1), %%mm2\n"
17706 + " movq 56(%1), %%mm3\n"
17707 + " movq %%mm0, 32(%2)\n"
17708 + " movq %%mm1, 40(%2)\n"
17709 + " movq %%mm2, 48(%2)\n"
17710 + " movq %%mm3, 56(%2)\n"
17711 ".section .fixup, \"ax\"\n"
17712 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17713 + "3:\n"
17714 +
17715 +#ifdef CONFIG_PAX_KERNEXEC
17716 + " movl %%cr0, %0\n"
17717 + " movl %0, %%eax\n"
17718 + " andl $0xFFFEFFFF, %%eax\n"
17719 + " movl %%eax, %%cr0\n"
17720 +#endif
17721 +
17722 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17723 +
17724 +#ifdef CONFIG_PAX_KERNEXEC
17725 + " movl %0, %%cr0\n"
17726 +#endif
17727 +
17728 " jmp 2b\n"
17729 ".previous\n"
17730 _ASM_EXTABLE(1b, 3b)
17731 - : : "r" (from), "r" (to) : "memory");
17732 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17733
17734 from += 64;
17735 to += 64;
17736 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17737 static void fast_copy_page(void *to, void *from)
17738 {
17739 int i;
17740 + unsigned long cr0;
17741
17742 kernel_fpu_begin();
17743
17744 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17745 * but that is for later. -AV
17746 */
17747 __asm__ __volatile__(
17748 - "1: prefetch (%0)\n"
17749 - " prefetch 64(%0)\n"
17750 - " prefetch 128(%0)\n"
17751 - " prefetch 192(%0)\n"
17752 - " prefetch 256(%0)\n"
17753 + "1: prefetch (%1)\n"
17754 + " prefetch 64(%1)\n"
17755 + " prefetch 128(%1)\n"
17756 + " prefetch 192(%1)\n"
17757 + " prefetch 256(%1)\n"
17758 "2: \n"
17759 ".section .fixup, \"ax\"\n"
17760 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17761 + "3: \n"
17762 +
17763 +#ifdef CONFIG_PAX_KERNEXEC
17764 + " movl %%cr0, %0\n"
17765 + " movl %0, %%eax\n"
17766 + " andl $0xFFFEFFFF, %%eax\n"
17767 + " movl %%eax, %%cr0\n"
17768 +#endif
17769 +
17770 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17771 +
17772 +#ifdef CONFIG_PAX_KERNEXEC
17773 + " movl %0, %%cr0\n"
17774 +#endif
17775 +
17776 " jmp 2b\n"
17777 ".previous\n"
17778 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17779 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17780
17781 for (i = 0; i < (4096-320)/64; i++) {
17782 __asm__ __volatile__ (
17783 - "1: prefetch 320(%0)\n"
17784 - "2: movq (%0), %%mm0\n"
17785 - " movntq %%mm0, (%1)\n"
17786 - " movq 8(%0), %%mm1\n"
17787 - " movntq %%mm1, 8(%1)\n"
17788 - " movq 16(%0), %%mm2\n"
17789 - " movntq %%mm2, 16(%1)\n"
17790 - " movq 24(%0), %%mm3\n"
17791 - " movntq %%mm3, 24(%1)\n"
17792 - " movq 32(%0), %%mm4\n"
17793 - " movntq %%mm4, 32(%1)\n"
17794 - " movq 40(%0), %%mm5\n"
17795 - " movntq %%mm5, 40(%1)\n"
17796 - " movq 48(%0), %%mm6\n"
17797 - " movntq %%mm6, 48(%1)\n"
17798 - " movq 56(%0), %%mm7\n"
17799 - " movntq %%mm7, 56(%1)\n"
17800 + "1: prefetch 320(%1)\n"
17801 + "2: movq (%1), %%mm0\n"
17802 + " movntq %%mm0, (%2)\n"
17803 + " movq 8(%1), %%mm1\n"
17804 + " movntq %%mm1, 8(%2)\n"
17805 + " movq 16(%1), %%mm2\n"
17806 + " movntq %%mm2, 16(%2)\n"
17807 + " movq 24(%1), %%mm3\n"
17808 + " movntq %%mm3, 24(%2)\n"
17809 + " movq 32(%1), %%mm4\n"
17810 + " movntq %%mm4, 32(%2)\n"
17811 + " movq 40(%1), %%mm5\n"
17812 + " movntq %%mm5, 40(%2)\n"
17813 + " movq 48(%1), %%mm6\n"
17814 + " movntq %%mm6, 48(%2)\n"
17815 + " movq 56(%1), %%mm7\n"
17816 + " movntq %%mm7, 56(%2)\n"
17817 ".section .fixup, \"ax\"\n"
17818 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17819 + "3:\n"
17820 +
17821 +#ifdef CONFIG_PAX_KERNEXEC
17822 + " movl %%cr0, %0\n"
17823 + " movl %0, %%eax\n"
17824 + " andl $0xFFFEFFFF, %%eax\n"
17825 + " movl %%eax, %%cr0\n"
17826 +#endif
17827 +
17828 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17829 +
17830 +#ifdef CONFIG_PAX_KERNEXEC
17831 + " movl %0, %%cr0\n"
17832 +#endif
17833 +
17834 " jmp 2b\n"
17835 ".previous\n"
17836 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17837 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17838
17839 from += 64;
17840 to += 64;
17841 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17842 static void fast_copy_page(void *to, void *from)
17843 {
17844 int i;
17845 + unsigned long cr0;
17846
17847 kernel_fpu_begin();
17848
17849 __asm__ __volatile__ (
17850 - "1: prefetch (%0)\n"
17851 - " prefetch 64(%0)\n"
17852 - " prefetch 128(%0)\n"
17853 - " prefetch 192(%0)\n"
17854 - " prefetch 256(%0)\n"
17855 + "1: prefetch (%1)\n"
17856 + " prefetch 64(%1)\n"
17857 + " prefetch 128(%1)\n"
17858 + " prefetch 192(%1)\n"
17859 + " prefetch 256(%1)\n"
17860 "2: \n"
17861 ".section .fixup, \"ax\"\n"
17862 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17863 + "3: \n"
17864 +
17865 +#ifdef CONFIG_PAX_KERNEXEC
17866 + " movl %%cr0, %0\n"
17867 + " movl %0, %%eax\n"
17868 + " andl $0xFFFEFFFF, %%eax\n"
17869 + " movl %%eax, %%cr0\n"
17870 +#endif
17871 +
17872 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17873 +
17874 +#ifdef CONFIG_PAX_KERNEXEC
17875 + " movl %0, %%cr0\n"
17876 +#endif
17877 +
17878 " jmp 2b\n"
17879 ".previous\n"
17880 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17881 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17882
17883 for (i = 0; i < 4096/64; i++) {
17884 __asm__ __volatile__ (
17885 - "1: prefetch 320(%0)\n"
17886 - "2: movq (%0), %%mm0\n"
17887 - " movq 8(%0), %%mm1\n"
17888 - " movq 16(%0), %%mm2\n"
17889 - " movq 24(%0), %%mm3\n"
17890 - " movq %%mm0, (%1)\n"
17891 - " movq %%mm1, 8(%1)\n"
17892 - " movq %%mm2, 16(%1)\n"
17893 - " movq %%mm3, 24(%1)\n"
17894 - " movq 32(%0), %%mm0\n"
17895 - " movq 40(%0), %%mm1\n"
17896 - " movq 48(%0), %%mm2\n"
17897 - " movq 56(%0), %%mm3\n"
17898 - " movq %%mm0, 32(%1)\n"
17899 - " movq %%mm1, 40(%1)\n"
17900 - " movq %%mm2, 48(%1)\n"
17901 - " movq %%mm3, 56(%1)\n"
17902 + "1: prefetch 320(%1)\n"
17903 + "2: movq (%1), %%mm0\n"
17904 + " movq 8(%1), %%mm1\n"
17905 + " movq 16(%1), %%mm2\n"
17906 + " movq 24(%1), %%mm3\n"
17907 + " movq %%mm0, (%2)\n"
17908 + " movq %%mm1, 8(%2)\n"
17909 + " movq %%mm2, 16(%2)\n"
17910 + " movq %%mm3, 24(%2)\n"
17911 + " movq 32(%1), %%mm0\n"
17912 + " movq 40(%1), %%mm1\n"
17913 + " movq 48(%1), %%mm2\n"
17914 + " movq 56(%1), %%mm3\n"
17915 + " movq %%mm0, 32(%2)\n"
17916 + " movq %%mm1, 40(%2)\n"
17917 + " movq %%mm2, 48(%2)\n"
17918 + " movq %%mm3, 56(%2)\n"
17919 ".section .fixup, \"ax\"\n"
17920 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17921 + "3:\n"
17922 +
17923 +#ifdef CONFIG_PAX_KERNEXEC
17924 + " movl %%cr0, %0\n"
17925 + " movl %0, %%eax\n"
17926 + " andl $0xFFFEFFFF, %%eax\n"
17927 + " movl %%eax, %%cr0\n"
17928 +#endif
17929 +
17930 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17931 +
17932 +#ifdef CONFIG_PAX_KERNEXEC
17933 + " movl %0, %%cr0\n"
17934 +#endif
17935 +
17936 " jmp 2b\n"
17937 ".previous\n"
17938 _ASM_EXTABLE(1b, 3b)
17939 - : : "r" (from), "r" (to) : "memory");
17940 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17941
17942 from += 64;
17943 to += 64;
17944 diff -urNp linux-3.0.4/arch/x86/lib/putuser.S linux-3.0.4/arch/x86/lib/putuser.S
17945 --- linux-3.0.4/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
17946 +++ linux-3.0.4/arch/x86/lib/putuser.S 2011-08-23 21:47:55.000000000 -0400
17947 @@ -15,7 +15,8 @@
17948 #include <asm/thread_info.h>
17949 #include <asm/errno.h>
17950 #include <asm/asm.h>
17951 -
17952 +#include <asm/segment.h>
17953 +#include <asm/pgtable.h>
17954
17955 /*
17956 * __put_user_X
17957 @@ -29,52 +30,119 @@
17958 * as they get called from within inline assembly.
17959 */
17960
17961 -#define ENTER CFI_STARTPROC ; \
17962 - GET_THREAD_INFO(%_ASM_BX)
17963 +#define ENTER CFI_STARTPROC
17964 #define EXIT ret ; \
17965 CFI_ENDPROC
17966
17967 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17968 +#define _DEST %_ASM_CX,%_ASM_BX
17969 +#else
17970 +#define _DEST %_ASM_CX
17971 +#endif
17972 +
17973 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17974 +#define __copyuser_seg gs;
17975 +#else
17976 +#define __copyuser_seg
17977 +#endif
17978 +
17979 .text
17980 ENTRY(__put_user_1)
17981 ENTER
17982 +
17983 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17984 + GET_THREAD_INFO(%_ASM_BX)
17985 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17986 jae bad_put_user
17987 -1: movb %al,(%_ASM_CX)
17988 +
17989 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17990 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17991 + cmp %_ASM_BX,%_ASM_CX
17992 + jb 1234f
17993 + xor %ebx,%ebx
17994 +1234:
17995 +#endif
17996 +
17997 +#endif
17998 +
17999 +1: __copyuser_seg movb %al,(_DEST)
18000 xor %eax,%eax
18001 EXIT
18002 ENDPROC(__put_user_1)
18003
18004 ENTRY(__put_user_2)
18005 ENTER
18006 +
18007 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18008 + GET_THREAD_INFO(%_ASM_BX)
18009 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18010 sub $1,%_ASM_BX
18011 cmp %_ASM_BX,%_ASM_CX
18012 jae bad_put_user
18013 -2: movw %ax,(%_ASM_CX)
18014 +
18015 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18016 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18017 + cmp %_ASM_BX,%_ASM_CX
18018 + jb 1234f
18019 + xor %ebx,%ebx
18020 +1234:
18021 +#endif
18022 +
18023 +#endif
18024 +
18025 +2: __copyuser_seg movw %ax,(_DEST)
18026 xor %eax,%eax
18027 EXIT
18028 ENDPROC(__put_user_2)
18029
18030 ENTRY(__put_user_4)
18031 ENTER
18032 +
18033 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18034 + GET_THREAD_INFO(%_ASM_BX)
18035 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18036 sub $3,%_ASM_BX
18037 cmp %_ASM_BX,%_ASM_CX
18038 jae bad_put_user
18039 -3: movl %eax,(%_ASM_CX)
18040 +
18041 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18042 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18043 + cmp %_ASM_BX,%_ASM_CX
18044 + jb 1234f
18045 + xor %ebx,%ebx
18046 +1234:
18047 +#endif
18048 +
18049 +#endif
18050 +
18051 +3: __copyuser_seg movl %eax,(_DEST)
18052 xor %eax,%eax
18053 EXIT
18054 ENDPROC(__put_user_4)
18055
18056 ENTRY(__put_user_8)
18057 ENTER
18058 +
18059 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18060 + GET_THREAD_INFO(%_ASM_BX)
18061 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18062 sub $7,%_ASM_BX
18063 cmp %_ASM_BX,%_ASM_CX
18064 jae bad_put_user
18065 -4: mov %_ASM_AX,(%_ASM_CX)
18066 +
18067 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18068 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18069 + cmp %_ASM_BX,%_ASM_CX
18070 + jb 1234f
18071 + xor %ebx,%ebx
18072 +1234:
18073 +#endif
18074 +
18075 +#endif
18076 +
18077 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
18078 #ifdef CONFIG_X86_32
18079 -5: movl %edx,4(%_ASM_CX)
18080 +5: __copyuser_seg movl %edx,4(_DEST)
18081 #endif
18082 xor %eax,%eax
18083 EXIT
18084 diff -urNp linux-3.0.4/arch/x86/lib/rwlock_64.S linux-3.0.4/arch/x86/lib/rwlock_64.S
18085 --- linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18086 +++ linux-3.0.4/arch/x86/lib/rwlock_64.S 2011-09-17 18:31:51.000000000 -0400
18087 @@ -17,6 +17,9 @@ ENTRY(__write_lock_failed)
18088 LOCK_PREFIX
18089 subl $RW_LOCK_BIAS,(%rdi)
18090 jnz __write_lock_failed
18091 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18092 + orb $0x80, 0x7(%rsp)
18093 +#endif
18094 ret
18095 CFI_ENDPROC
18096 END(__write_lock_failed)
18097 @@ -33,6 +36,9 @@ ENTRY(__read_lock_failed)
18098 LOCK_PREFIX
18099 decl (%rdi)
18100 js __read_lock_failed
18101 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18102 + orb $0x80, 0x7(%rsp)
18103 +#endif
18104 ret
18105 CFI_ENDPROC
18106 END(__read_lock_failed)
18107 diff -urNp linux-3.0.4/arch/x86/lib/rwsem_64.S linux-3.0.4/arch/x86/lib/rwsem_64.S
18108 --- linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18109 +++ linux-3.0.4/arch/x86/lib/rwsem_64.S 2011-09-17 18:31:51.000000000 -0400
18110 @@ -51,6 +51,9 @@ ENTRY(call_rwsem_down_read_failed)
18111 popq_cfi %rdx
18112 CFI_RESTORE rdx
18113 restore_common_regs
18114 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18115 + orb $0x80, 0x7(%rsp)
18116 +#endif
18117 ret
18118 CFI_ENDPROC
18119 ENDPROC(call_rwsem_down_read_failed)
18120 @@ -61,6 +64,9 @@ ENTRY(call_rwsem_down_write_failed)
18121 movq %rax,%rdi
18122 call rwsem_down_write_failed
18123 restore_common_regs
18124 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18125 + orb $0x80, 0x7(%rsp)
18126 +#endif
18127 ret
18128 CFI_ENDPROC
18129 ENDPROC(call_rwsem_down_write_failed)
18130 @@ -73,6 +79,9 @@ ENTRY(call_rwsem_wake)
18131 movq %rax,%rdi
18132 call rwsem_wake
18133 restore_common_regs
18134 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18135 + orb $0x80, 0x7(%rsp)
18136 +#endif
18137 1: ret
18138 CFI_ENDPROC
18139 ENDPROC(call_rwsem_wake)
18140 @@ -88,6 +97,9 @@ ENTRY(call_rwsem_downgrade_wake)
18141 popq_cfi %rdx
18142 CFI_RESTORE rdx
18143 restore_common_regs
18144 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18145 + orb $0x80, 0x7(%rsp)
18146 +#endif
18147 ret
18148 CFI_ENDPROC
18149 ENDPROC(call_rwsem_downgrade_wake)
18150 diff -urNp linux-3.0.4/arch/x86/lib/thunk_64.S linux-3.0.4/arch/x86/lib/thunk_64.S
18151 --- linux-3.0.4/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18152 +++ linux-3.0.4/arch/x86/lib/thunk_64.S 2011-09-17 18:31:51.000000000 -0400
18153 @@ -50,5 +50,8 @@
18154 SAVE_ARGS
18155 restore:
18156 RESTORE_ARGS
18157 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
18158 + orb $0x80, 0x7(%rsp)
18159 +#endif
18160 ret
18161 CFI_ENDPROC
18162 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_32.c linux-3.0.4/arch/x86/lib/usercopy_32.c
18163 --- linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18164 +++ linux-3.0.4/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18165 @@ -43,7 +43,7 @@ do { \
18166 __asm__ __volatile__( \
18167 " testl %1,%1\n" \
18168 " jz 2f\n" \
18169 - "0: lodsb\n" \
18170 + "0: "__copyuser_seg"lodsb\n" \
18171 " stosb\n" \
18172 " testb %%al,%%al\n" \
18173 " jz 1f\n" \
18174 @@ -128,10 +128,12 @@ do { \
18175 int __d0; \
18176 might_fault(); \
18177 __asm__ __volatile__( \
18178 + __COPYUSER_SET_ES \
18179 "0: rep; stosl\n" \
18180 " movl %2,%0\n" \
18181 "1: rep; stosb\n" \
18182 "2:\n" \
18183 + __COPYUSER_RESTORE_ES \
18184 ".section .fixup,\"ax\"\n" \
18185 "3: lea 0(%2,%0,4),%0\n" \
18186 " jmp 2b\n" \
18187 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18188 might_fault();
18189
18190 __asm__ __volatile__(
18191 + __COPYUSER_SET_ES
18192 " testl %0, %0\n"
18193 " jz 3f\n"
18194 " andl %0,%%ecx\n"
18195 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18196 " subl %%ecx,%0\n"
18197 " addl %0,%%eax\n"
18198 "1:\n"
18199 + __COPYUSER_RESTORE_ES
18200 ".section .fixup,\"ax\"\n"
18201 "2: xorl %%eax,%%eax\n"
18202 " jmp 1b\n"
18203 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18204
18205 #ifdef CONFIG_X86_INTEL_USERCOPY
18206 static unsigned long
18207 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
18208 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18209 {
18210 int d0, d1;
18211 __asm__ __volatile__(
18212 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18213 " .align 2,0x90\n"
18214 "3: movl 0(%4), %%eax\n"
18215 "4: movl 4(%4), %%edx\n"
18216 - "5: movl %%eax, 0(%3)\n"
18217 - "6: movl %%edx, 4(%3)\n"
18218 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18219 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18220 "7: movl 8(%4), %%eax\n"
18221 "8: movl 12(%4),%%edx\n"
18222 - "9: movl %%eax, 8(%3)\n"
18223 - "10: movl %%edx, 12(%3)\n"
18224 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18225 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18226 "11: movl 16(%4), %%eax\n"
18227 "12: movl 20(%4), %%edx\n"
18228 - "13: movl %%eax, 16(%3)\n"
18229 - "14: movl %%edx, 20(%3)\n"
18230 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18231 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18232 "15: movl 24(%4), %%eax\n"
18233 "16: movl 28(%4), %%edx\n"
18234 - "17: movl %%eax, 24(%3)\n"
18235 - "18: movl %%edx, 28(%3)\n"
18236 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18237 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18238 "19: movl 32(%4), %%eax\n"
18239 "20: movl 36(%4), %%edx\n"
18240 - "21: movl %%eax, 32(%3)\n"
18241 - "22: movl %%edx, 36(%3)\n"
18242 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18243 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18244 "23: movl 40(%4), %%eax\n"
18245 "24: movl 44(%4), %%edx\n"
18246 - "25: movl %%eax, 40(%3)\n"
18247 - "26: movl %%edx, 44(%3)\n"
18248 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18249 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18250 "27: movl 48(%4), %%eax\n"
18251 "28: movl 52(%4), %%edx\n"
18252 - "29: movl %%eax, 48(%3)\n"
18253 - "30: movl %%edx, 52(%3)\n"
18254 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18255 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18256 "31: movl 56(%4), %%eax\n"
18257 "32: movl 60(%4), %%edx\n"
18258 - "33: movl %%eax, 56(%3)\n"
18259 - "34: movl %%edx, 60(%3)\n"
18260 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18261 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18262 " addl $-64, %0\n"
18263 " addl $64, %4\n"
18264 " addl $64, %3\n"
18265 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18266 " shrl $2, %0\n"
18267 " andl $3, %%eax\n"
18268 " cld\n"
18269 + __COPYUSER_SET_ES
18270 "99: rep; movsl\n"
18271 "36: movl %%eax, %0\n"
18272 "37: rep; movsb\n"
18273 "100:\n"
18274 + __COPYUSER_RESTORE_ES
18275 + ".section .fixup,\"ax\"\n"
18276 + "101: lea 0(%%eax,%0,4),%0\n"
18277 + " jmp 100b\n"
18278 + ".previous\n"
18279 + ".section __ex_table,\"a\"\n"
18280 + " .align 4\n"
18281 + " .long 1b,100b\n"
18282 + " .long 2b,100b\n"
18283 + " .long 3b,100b\n"
18284 + " .long 4b,100b\n"
18285 + " .long 5b,100b\n"
18286 + " .long 6b,100b\n"
18287 + " .long 7b,100b\n"
18288 + " .long 8b,100b\n"
18289 + " .long 9b,100b\n"
18290 + " .long 10b,100b\n"
18291 + " .long 11b,100b\n"
18292 + " .long 12b,100b\n"
18293 + " .long 13b,100b\n"
18294 + " .long 14b,100b\n"
18295 + " .long 15b,100b\n"
18296 + " .long 16b,100b\n"
18297 + " .long 17b,100b\n"
18298 + " .long 18b,100b\n"
18299 + " .long 19b,100b\n"
18300 + " .long 20b,100b\n"
18301 + " .long 21b,100b\n"
18302 + " .long 22b,100b\n"
18303 + " .long 23b,100b\n"
18304 + " .long 24b,100b\n"
18305 + " .long 25b,100b\n"
18306 + " .long 26b,100b\n"
18307 + " .long 27b,100b\n"
18308 + " .long 28b,100b\n"
18309 + " .long 29b,100b\n"
18310 + " .long 30b,100b\n"
18311 + " .long 31b,100b\n"
18312 + " .long 32b,100b\n"
18313 + " .long 33b,100b\n"
18314 + " .long 34b,100b\n"
18315 + " .long 35b,100b\n"
18316 + " .long 36b,100b\n"
18317 + " .long 37b,100b\n"
18318 + " .long 99b,101b\n"
18319 + ".previous"
18320 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
18321 + : "1"(to), "2"(from), "0"(size)
18322 + : "eax", "edx", "memory");
18323 + return size;
18324 +}
18325 +
18326 +static unsigned long
18327 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
18328 +{
18329 + int d0, d1;
18330 + __asm__ __volatile__(
18331 + " .align 2,0x90\n"
18332 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
18333 + " cmpl $67, %0\n"
18334 + " jbe 3f\n"
18335 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
18336 + " .align 2,0x90\n"
18337 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
18338 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
18339 + "5: movl %%eax, 0(%3)\n"
18340 + "6: movl %%edx, 4(%3)\n"
18341 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
18342 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
18343 + "9: movl %%eax, 8(%3)\n"
18344 + "10: movl %%edx, 12(%3)\n"
18345 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
18346 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
18347 + "13: movl %%eax, 16(%3)\n"
18348 + "14: movl %%edx, 20(%3)\n"
18349 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
18350 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
18351 + "17: movl %%eax, 24(%3)\n"
18352 + "18: movl %%edx, 28(%3)\n"
18353 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
18354 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
18355 + "21: movl %%eax, 32(%3)\n"
18356 + "22: movl %%edx, 36(%3)\n"
18357 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
18358 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
18359 + "25: movl %%eax, 40(%3)\n"
18360 + "26: movl %%edx, 44(%3)\n"
18361 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
18362 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
18363 + "29: movl %%eax, 48(%3)\n"
18364 + "30: movl %%edx, 52(%3)\n"
18365 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
18366 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18367 + "33: movl %%eax, 56(%3)\n"
18368 + "34: movl %%edx, 60(%3)\n"
18369 + " addl $-64, %0\n"
18370 + " addl $64, %4\n"
18371 + " addl $64, %3\n"
18372 + " cmpl $63, %0\n"
18373 + " ja 1b\n"
18374 + "35: movl %0, %%eax\n"
18375 + " shrl $2, %0\n"
18376 + " andl $3, %%eax\n"
18377 + " cld\n"
18378 + "99: rep; "__copyuser_seg" movsl\n"
18379 + "36: movl %%eax, %0\n"
18380 + "37: rep; "__copyuser_seg" movsb\n"
18381 + "100:\n"
18382 ".section .fixup,\"ax\"\n"
18383 "101: lea 0(%%eax,%0,4),%0\n"
18384 " jmp 100b\n"
18385 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18386 int d0, d1;
18387 __asm__ __volatile__(
18388 " .align 2,0x90\n"
18389 - "0: movl 32(%4), %%eax\n"
18390 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18391 " cmpl $67, %0\n"
18392 " jbe 2f\n"
18393 - "1: movl 64(%4), %%eax\n"
18394 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18395 " .align 2,0x90\n"
18396 - "2: movl 0(%4), %%eax\n"
18397 - "21: movl 4(%4), %%edx\n"
18398 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18399 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18400 " movl %%eax, 0(%3)\n"
18401 " movl %%edx, 4(%3)\n"
18402 - "3: movl 8(%4), %%eax\n"
18403 - "31: movl 12(%4),%%edx\n"
18404 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18405 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18406 " movl %%eax, 8(%3)\n"
18407 " movl %%edx, 12(%3)\n"
18408 - "4: movl 16(%4), %%eax\n"
18409 - "41: movl 20(%4), %%edx\n"
18410 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18411 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18412 " movl %%eax, 16(%3)\n"
18413 " movl %%edx, 20(%3)\n"
18414 - "10: movl 24(%4), %%eax\n"
18415 - "51: movl 28(%4), %%edx\n"
18416 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18417 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18418 " movl %%eax, 24(%3)\n"
18419 " movl %%edx, 28(%3)\n"
18420 - "11: movl 32(%4), %%eax\n"
18421 - "61: movl 36(%4), %%edx\n"
18422 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18423 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18424 " movl %%eax, 32(%3)\n"
18425 " movl %%edx, 36(%3)\n"
18426 - "12: movl 40(%4), %%eax\n"
18427 - "71: movl 44(%4), %%edx\n"
18428 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18429 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18430 " movl %%eax, 40(%3)\n"
18431 " movl %%edx, 44(%3)\n"
18432 - "13: movl 48(%4), %%eax\n"
18433 - "81: movl 52(%4), %%edx\n"
18434 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18435 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18436 " movl %%eax, 48(%3)\n"
18437 " movl %%edx, 52(%3)\n"
18438 - "14: movl 56(%4), %%eax\n"
18439 - "91: movl 60(%4), %%edx\n"
18440 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18441 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18442 " movl %%eax, 56(%3)\n"
18443 " movl %%edx, 60(%3)\n"
18444 " addl $-64, %0\n"
18445 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18446 " shrl $2, %0\n"
18447 " andl $3, %%eax\n"
18448 " cld\n"
18449 - "6: rep; movsl\n"
18450 + "6: rep; "__copyuser_seg" movsl\n"
18451 " movl %%eax,%0\n"
18452 - "7: rep; movsb\n"
18453 + "7: rep; "__copyuser_seg" movsb\n"
18454 "8:\n"
18455 ".section .fixup,\"ax\"\n"
18456 "9: lea 0(%%eax,%0,4),%0\n"
18457 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18458
18459 __asm__ __volatile__(
18460 " .align 2,0x90\n"
18461 - "0: movl 32(%4), %%eax\n"
18462 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18463 " cmpl $67, %0\n"
18464 " jbe 2f\n"
18465 - "1: movl 64(%4), %%eax\n"
18466 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18467 " .align 2,0x90\n"
18468 - "2: movl 0(%4), %%eax\n"
18469 - "21: movl 4(%4), %%edx\n"
18470 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18471 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18472 " movnti %%eax, 0(%3)\n"
18473 " movnti %%edx, 4(%3)\n"
18474 - "3: movl 8(%4), %%eax\n"
18475 - "31: movl 12(%4),%%edx\n"
18476 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18477 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18478 " movnti %%eax, 8(%3)\n"
18479 " movnti %%edx, 12(%3)\n"
18480 - "4: movl 16(%4), %%eax\n"
18481 - "41: movl 20(%4), %%edx\n"
18482 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18483 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18484 " movnti %%eax, 16(%3)\n"
18485 " movnti %%edx, 20(%3)\n"
18486 - "10: movl 24(%4), %%eax\n"
18487 - "51: movl 28(%4), %%edx\n"
18488 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18489 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18490 " movnti %%eax, 24(%3)\n"
18491 " movnti %%edx, 28(%3)\n"
18492 - "11: movl 32(%4), %%eax\n"
18493 - "61: movl 36(%4), %%edx\n"
18494 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18495 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18496 " movnti %%eax, 32(%3)\n"
18497 " movnti %%edx, 36(%3)\n"
18498 - "12: movl 40(%4), %%eax\n"
18499 - "71: movl 44(%4), %%edx\n"
18500 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18501 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18502 " movnti %%eax, 40(%3)\n"
18503 " movnti %%edx, 44(%3)\n"
18504 - "13: movl 48(%4), %%eax\n"
18505 - "81: movl 52(%4), %%edx\n"
18506 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18507 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18508 " movnti %%eax, 48(%3)\n"
18509 " movnti %%edx, 52(%3)\n"
18510 - "14: movl 56(%4), %%eax\n"
18511 - "91: movl 60(%4), %%edx\n"
18512 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18513 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18514 " movnti %%eax, 56(%3)\n"
18515 " movnti %%edx, 60(%3)\n"
18516 " addl $-64, %0\n"
18517 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18518 " shrl $2, %0\n"
18519 " andl $3, %%eax\n"
18520 " cld\n"
18521 - "6: rep; movsl\n"
18522 + "6: rep; "__copyuser_seg" movsl\n"
18523 " movl %%eax,%0\n"
18524 - "7: rep; movsb\n"
18525 + "7: rep; "__copyuser_seg" movsb\n"
18526 "8:\n"
18527 ".section .fixup,\"ax\"\n"
18528 "9: lea 0(%%eax,%0,4),%0\n"
18529 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18530
18531 __asm__ __volatile__(
18532 " .align 2,0x90\n"
18533 - "0: movl 32(%4), %%eax\n"
18534 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18535 " cmpl $67, %0\n"
18536 " jbe 2f\n"
18537 - "1: movl 64(%4), %%eax\n"
18538 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18539 " .align 2,0x90\n"
18540 - "2: movl 0(%4), %%eax\n"
18541 - "21: movl 4(%4), %%edx\n"
18542 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18543 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18544 " movnti %%eax, 0(%3)\n"
18545 " movnti %%edx, 4(%3)\n"
18546 - "3: movl 8(%4), %%eax\n"
18547 - "31: movl 12(%4),%%edx\n"
18548 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18549 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18550 " movnti %%eax, 8(%3)\n"
18551 " movnti %%edx, 12(%3)\n"
18552 - "4: movl 16(%4), %%eax\n"
18553 - "41: movl 20(%4), %%edx\n"
18554 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18555 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18556 " movnti %%eax, 16(%3)\n"
18557 " movnti %%edx, 20(%3)\n"
18558 - "10: movl 24(%4), %%eax\n"
18559 - "51: movl 28(%4), %%edx\n"
18560 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18561 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18562 " movnti %%eax, 24(%3)\n"
18563 " movnti %%edx, 28(%3)\n"
18564 - "11: movl 32(%4), %%eax\n"
18565 - "61: movl 36(%4), %%edx\n"
18566 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18567 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18568 " movnti %%eax, 32(%3)\n"
18569 " movnti %%edx, 36(%3)\n"
18570 - "12: movl 40(%4), %%eax\n"
18571 - "71: movl 44(%4), %%edx\n"
18572 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18573 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18574 " movnti %%eax, 40(%3)\n"
18575 " movnti %%edx, 44(%3)\n"
18576 - "13: movl 48(%4), %%eax\n"
18577 - "81: movl 52(%4), %%edx\n"
18578 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18579 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18580 " movnti %%eax, 48(%3)\n"
18581 " movnti %%edx, 52(%3)\n"
18582 - "14: movl 56(%4), %%eax\n"
18583 - "91: movl 60(%4), %%edx\n"
18584 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18585 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18586 " movnti %%eax, 56(%3)\n"
18587 " movnti %%edx, 60(%3)\n"
18588 " addl $-64, %0\n"
18589 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18590 " shrl $2, %0\n"
18591 " andl $3, %%eax\n"
18592 " cld\n"
18593 - "6: rep; movsl\n"
18594 + "6: rep; "__copyuser_seg" movsl\n"
18595 " movl %%eax,%0\n"
18596 - "7: rep; movsb\n"
18597 + "7: rep; "__copyuser_seg" movsb\n"
18598 "8:\n"
18599 ".section .fixup,\"ax\"\n"
18600 "9: lea 0(%%eax,%0,4),%0\n"
18601 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18602 */
18603 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18604 unsigned long size);
18605 -unsigned long __copy_user_intel(void __user *to, const void *from,
18606 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18607 + unsigned long size);
18608 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18609 unsigned long size);
18610 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18611 const void __user *from, unsigned long size);
18612 #endif /* CONFIG_X86_INTEL_USERCOPY */
18613
18614 /* Generic arbitrary sized copy. */
18615 -#define __copy_user(to, from, size) \
18616 +#define __copy_user(to, from, size, prefix, set, restore) \
18617 do { \
18618 int __d0, __d1, __d2; \
18619 __asm__ __volatile__( \
18620 + set \
18621 " cmp $7,%0\n" \
18622 " jbe 1f\n" \
18623 " movl %1,%0\n" \
18624 " negl %0\n" \
18625 " andl $7,%0\n" \
18626 " subl %0,%3\n" \
18627 - "4: rep; movsb\n" \
18628 + "4: rep; "prefix"movsb\n" \
18629 " movl %3,%0\n" \
18630 " shrl $2,%0\n" \
18631 " andl $3,%3\n" \
18632 " .align 2,0x90\n" \
18633 - "0: rep; movsl\n" \
18634 + "0: rep; "prefix"movsl\n" \
18635 " movl %3,%0\n" \
18636 - "1: rep; movsb\n" \
18637 + "1: rep; "prefix"movsb\n" \
18638 "2:\n" \
18639 + restore \
18640 ".section .fixup,\"ax\"\n" \
18641 "5: addl %3,%0\n" \
18642 " jmp 2b\n" \
18643 @@ -682,14 +799,14 @@ do { \
18644 " negl %0\n" \
18645 " andl $7,%0\n" \
18646 " subl %0,%3\n" \
18647 - "4: rep; movsb\n" \
18648 + "4: rep; "__copyuser_seg"movsb\n" \
18649 " movl %3,%0\n" \
18650 " shrl $2,%0\n" \
18651 " andl $3,%3\n" \
18652 " .align 2,0x90\n" \
18653 - "0: rep; movsl\n" \
18654 + "0: rep; "__copyuser_seg"movsl\n" \
18655 " movl %3,%0\n" \
18656 - "1: rep; movsb\n" \
18657 + "1: rep; "__copyuser_seg"movsb\n" \
18658 "2:\n" \
18659 ".section .fixup,\"ax\"\n" \
18660 "5: addl %3,%0\n" \
18661 @@ -775,9 +892,9 @@ survive:
18662 }
18663 #endif
18664 if (movsl_is_ok(to, from, n))
18665 - __copy_user(to, from, n);
18666 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18667 else
18668 - n = __copy_user_intel(to, from, n);
18669 + n = __generic_copy_to_user_intel(to, from, n);
18670 return n;
18671 }
18672 EXPORT_SYMBOL(__copy_to_user_ll);
18673 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18674 unsigned long n)
18675 {
18676 if (movsl_is_ok(to, from, n))
18677 - __copy_user(to, from, n);
18678 + __copy_user(to, from, n, __copyuser_seg, "", "");
18679 else
18680 - n = __copy_user_intel((void __user *)to,
18681 - (const void *)from, n);
18682 + n = __generic_copy_from_user_intel(to, from, n);
18683 return n;
18684 }
18685 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18686 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18687 if (n > 64 && cpu_has_xmm2)
18688 n = __copy_user_intel_nocache(to, from, n);
18689 else
18690 - __copy_user(to, from, n);
18691 + __copy_user(to, from, n, __copyuser_seg, "", "");
18692 #else
18693 - __copy_user(to, from, n);
18694 + __copy_user(to, from, n, __copyuser_seg, "", "");
18695 #endif
18696 return n;
18697 }
18698 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18699
18700 -/**
18701 - * copy_to_user: - Copy a block of data into user space.
18702 - * @to: Destination address, in user space.
18703 - * @from: Source address, in kernel space.
18704 - * @n: Number of bytes to copy.
18705 - *
18706 - * Context: User context only. This function may sleep.
18707 - *
18708 - * Copy data from kernel space to user space.
18709 - *
18710 - * Returns number of bytes that could not be copied.
18711 - * On success, this will be zero.
18712 - */
18713 -unsigned long
18714 -copy_to_user(void __user *to, const void *from, unsigned long n)
18715 +void copy_from_user_overflow(void)
18716 {
18717 - if (access_ok(VERIFY_WRITE, to, n))
18718 - n = __copy_to_user(to, from, n);
18719 - return n;
18720 + WARN(1, "Buffer overflow detected!\n");
18721 }
18722 -EXPORT_SYMBOL(copy_to_user);
18723 +EXPORT_SYMBOL(copy_from_user_overflow);
18724
18725 -/**
18726 - * copy_from_user: - Copy a block of data from user space.
18727 - * @to: Destination address, in kernel space.
18728 - * @from: Source address, in user space.
18729 - * @n: Number of bytes to copy.
18730 - *
18731 - * Context: User context only. This function may sleep.
18732 - *
18733 - * Copy data from user space to kernel space.
18734 - *
18735 - * Returns number of bytes that could not be copied.
18736 - * On success, this will be zero.
18737 - *
18738 - * If some data could not be copied, this function will pad the copied
18739 - * data to the requested size using zero bytes.
18740 - */
18741 -unsigned long
18742 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18743 +void copy_to_user_overflow(void)
18744 {
18745 - if (access_ok(VERIFY_READ, from, n))
18746 - n = __copy_from_user(to, from, n);
18747 - else
18748 - memset(to, 0, n);
18749 - return n;
18750 + WARN(1, "Buffer overflow detected!\n");
18751 }
18752 -EXPORT_SYMBOL(_copy_from_user);
18753 +EXPORT_SYMBOL(copy_to_user_overflow);
18754
18755 -void copy_from_user_overflow(void)
18756 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18757 +void __set_fs(mm_segment_t x)
18758 {
18759 - WARN(1, "Buffer overflow detected!\n");
18760 + switch (x.seg) {
18761 + case 0:
18762 + loadsegment(gs, 0);
18763 + break;
18764 + case TASK_SIZE_MAX:
18765 + loadsegment(gs, __USER_DS);
18766 + break;
18767 + case -1UL:
18768 + loadsegment(gs, __KERNEL_DS);
18769 + break;
18770 + default:
18771 + BUG();
18772 + }
18773 + return;
18774 }
18775 -EXPORT_SYMBOL(copy_from_user_overflow);
18776 +EXPORT_SYMBOL(__set_fs);
18777 +
18778 +void set_fs(mm_segment_t x)
18779 +{
18780 + current_thread_info()->addr_limit = x;
18781 + __set_fs(x);
18782 +}
18783 +EXPORT_SYMBOL(set_fs);
18784 +#endif
18785 diff -urNp linux-3.0.4/arch/x86/lib/usercopy_64.c linux-3.0.4/arch/x86/lib/usercopy_64.c
18786 --- linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
18787 +++ linux-3.0.4/arch/x86/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
18788 @@ -42,6 +42,12 @@ long
18789 __strncpy_from_user(char *dst, const char __user *src, long count)
18790 {
18791 long res;
18792 +
18793 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18794 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18795 + src += PAX_USER_SHADOW_BASE;
18796 +#endif
18797 +
18798 __do_strncpy_from_user(dst, src, count, res);
18799 return res;
18800 }
18801 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18802 {
18803 long __d0;
18804 might_fault();
18805 +
18806 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18807 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18808 + addr += PAX_USER_SHADOW_BASE;
18809 +#endif
18810 +
18811 /* no memory constraint because it doesn't change any memory gcc knows
18812 about */
18813 asm volatile(
18814 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18815
18816 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18817 {
18818 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18819 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18820 +
18821 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18822 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18823 + to += PAX_USER_SHADOW_BASE;
18824 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18825 + from += PAX_USER_SHADOW_BASE;
18826 +#endif
18827 +
18828 return copy_user_generic((__force void *)to, (__force void *)from, len);
18829 - }
18830 - return len;
18831 + }
18832 + return len;
18833 }
18834 EXPORT_SYMBOL(copy_in_user);
18835
18836 diff -urNp linux-3.0.4/arch/x86/Makefile linux-3.0.4/arch/x86/Makefile
18837 --- linux-3.0.4/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
18838 +++ linux-3.0.4/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
18839 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18840 else
18841 BITS := 64
18842 UTS_MACHINE := x86_64
18843 + biarch := $(call cc-option,-m64)
18844 CHECKFLAGS += -D__x86_64__ -m64
18845
18846 KBUILD_AFLAGS += -m64
18847 @@ -195,3 +196,12 @@ define archhelp
18848 echo ' FDARGS="..." arguments for the booted kernel'
18849 echo ' FDINITRD=file initrd for the booted kernel'
18850 endef
18851 +
18852 +define OLD_LD
18853 +
18854 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18855 +*** Please upgrade your binutils to 2.18 or newer
18856 +endef
18857 +
18858 +archprepare:
18859 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18860 diff -urNp linux-3.0.4/arch/x86/mm/extable.c linux-3.0.4/arch/x86/mm/extable.c
18861 --- linux-3.0.4/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
18862 +++ linux-3.0.4/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
18863 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
18864 const struct exception_table_entry *fixup;
18865
18866 #ifdef CONFIG_PNPBIOS
18867 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18868 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18869 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18870 extern u32 pnp_bios_is_utter_crap;
18871 pnp_bios_is_utter_crap = 1;
18872 diff -urNp linux-3.0.4/arch/x86/mm/fault.c linux-3.0.4/arch/x86/mm/fault.c
18873 --- linux-3.0.4/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
18874 +++ linux-3.0.4/arch/x86/mm/fault.c 2011-08-23 21:48:14.000000000 -0400
18875 @@ -13,10 +13,18 @@
18876 #include <linux/perf_event.h> /* perf_sw_event */
18877 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18878 #include <linux/prefetch.h> /* prefetchw */
18879 +#include <linux/unistd.h>
18880 +#include <linux/compiler.h>
18881
18882 #include <asm/traps.h> /* dotraplinkage, ... */
18883 #include <asm/pgalloc.h> /* pgd_*(), ... */
18884 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18885 +#include <asm/vsyscall.h>
18886 +#include <asm/tlbflush.h>
18887 +
18888 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18889 +#include <asm/stacktrace.h>
18890 +#endif
18891
18892 /*
18893 * Page fault error code bits:
18894 @@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
18895 int ret = 0;
18896
18897 /* kprobe_running() needs smp_processor_id() */
18898 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18899 + if (kprobes_built_in() && !user_mode(regs)) {
18900 preempt_disable();
18901 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18902 ret = 1;
18903 @@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
18904 return !instr_lo || (instr_lo>>1) == 1;
18905 case 0x00:
18906 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18907 - if (probe_kernel_address(instr, opcode))
18908 + if (user_mode(regs)) {
18909 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18910 + return 0;
18911 + } else if (probe_kernel_address(instr, opcode))
18912 return 0;
18913
18914 *prefetch = (instr_lo == 0xF) &&
18915 @@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
18916 while (instr < max_instr) {
18917 unsigned char opcode;
18918
18919 - if (probe_kernel_address(instr, opcode))
18920 + if (user_mode(regs)) {
18921 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18922 + break;
18923 + } else if (probe_kernel_address(instr, opcode))
18924 break;
18925
18926 instr++;
18927 @@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
18928 force_sig_info(si_signo, &info, tsk);
18929 }
18930
18931 +#ifdef CONFIG_PAX_EMUTRAMP
18932 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18933 +#endif
18934 +
18935 +#ifdef CONFIG_PAX_PAGEEXEC
18936 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18937 +{
18938 + pgd_t *pgd;
18939 + pud_t *pud;
18940 + pmd_t *pmd;
18941 +
18942 + pgd = pgd_offset(mm, address);
18943 + if (!pgd_present(*pgd))
18944 + return NULL;
18945 + pud = pud_offset(pgd, address);
18946 + if (!pud_present(*pud))
18947 + return NULL;
18948 + pmd = pmd_offset(pud, address);
18949 + if (!pmd_present(*pmd))
18950 + return NULL;
18951 + return pmd;
18952 +}
18953 +#endif
18954 +
18955 DEFINE_SPINLOCK(pgd_lock);
18956 LIST_HEAD(pgd_list);
18957
18958 @@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
18959 for (address = VMALLOC_START & PMD_MASK;
18960 address >= TASK_SIZE && address < FIXADDR_TOP;
18961 address += PMD_SIZE) {
18962 +
18963 +#ifdef CONFIG_PAX_PER_CPU_PGD
18964 + unsigned long cpu;
18965 +#else
18966 struct page *page;
18967 +#endif
18968
18969 spin_lock(&pgd_lock);
18970 +
18971 +#ifdef CONFIG_PAX_PER_CPU_PGD
18972 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18973 + pgd_t *pgd = get_cpu_pgd(cpu);
18974 + pmd_t *ret;
18975 +#else
18976 list_for_each_entry(page, &pgd_list, lru) {
18977 + pgd_t *pgd = page_address(page);
18978 spinlock_t *pgt_lock;
18979 pmd_t *ret;
18980
18981 @@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
18982 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18983
18984 spin_lock(pgt_lock);
18985 - ret = vmalloc_sync_one(page_address(page), address);
18986 +#endif
18987 +
18988 + ret = vmalloc_sync_one(pgd, address);
18989 +
18990 +#ifndef CONFIG_PAX_PER_CPU_PGD
18991 spin_unlock(pgt_lock);
18992 +#endif
18993
18994 if (!ret)
18995 break;
18996 @@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
18997 * an interrupt in the middle of a task switch..
18998 */
18999 pgd_paddr = read_cr3();
19000 +
19001 +#ifdef CONFIG_PAX_PER_CPU_PGD
19002 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19003 +#endif
19004 +
19005 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19006 if (!pmd_k)
19007 return -1;
19008 @@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19009 * happen within a race in page table update. In the later
19010 * case just flush:
19011 */
19012 +
19013 +#ifdef CONFIG_PAX_PER_CPU_PGD
19014 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19015 + pgd = pgd_offset_cpu(smp_processor_id(), address);
19016 +#else
19017 pgd = pgd_offset(current->active_mm, address);
19018 +#endif
19019 +
19020 pgd_ref = pgd_offset_k(address);
19021 if (pgd_none(*pgd_ref))
19022 return -1;
19023 @@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19024 static int is_errata100(struct pt_regs *regs, unsigned long address)
19025 {
19026 #ifdef CONFIG_X86_64
19027 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19028 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19029 return 1;
19030 #endif
19031 return 0;
19032 @@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19033 }
19034
19035 static const char nx_warning[] = KERN_CRIT
19036 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19037 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19038
19039 static void
19040 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19041 @@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19042 if (!oops_may_print())
19043 return;
19044
19045 - if (error_code & PF_INSTR) {
19046 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19047 unsigned int level;
19048
19049 pte_t *pte = lookup_address(address, &level);
19050
19051 if (pte && pte_present(*pte) && !pte_exec(*pte))
19052 - printk(nx_warning, current_uid());
19053 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19054 + }
19055 +
19056 +#ifdef CONFIG_PAX_KERNEXEC
19057 + if (init_mm.start_code <= address && address < init_mm.end_code) {
19058 + if (current->signal->curr_ip)
19059 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19060 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19061 + else
19062 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19063 + current->comm, task_pid_nr(current), current_uid(), current_euid());
19064 }
19065 +#endif
19066
19067 printk(KERN_ALERT "BUG: unable to handle kernel ");
19068 if (address < PAGE_SIZE)
19069 @@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19070 unsigned long address, int si_code)
19071 {
19072 struct task_struct *tsk = current;
19073 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19074 + struct mm_struct *mm = tsk->mm;
19075 +#endif
19076 +
19077 +#ifdef CONFIG_X86_64
19078 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19079 + if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19080 + regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19081 + regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19082 + regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19083 + return;
19084 + }
19085 + }
19086 +#endif
19087 +
19088 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19089 + if (mm && (error_code & PF_USER)) {
19090 + unsigned long ip = regs->ip;
19091 +
19092 + if (v8086_mode(regs))
19093 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19094 +
19095 + /*
19096 + * It's possible to have interrupts off here:
19097 + */
19098 + local_irq_enable();
19099 +
19100 +#ifdef CONFIG_PAX_PAGEEXEC
19101 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19102 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19103 +
19104 +#ifdef CONFIG_PAX_EMUTRAMP
19105 + switch (pax_handle_fetch_fault(regs)) {
19106 + case 2:
19107 + return;
19108 + }
19109 +#endif
19110 +
19111 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19112 + do_group_exit(SIGKILL);
19113 + }
19114 +#endif
19115 +
19116 +#ifdef CONFIG_PAX_SEGMEXEC
19117 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19118 +
19119 +#ifdef CONFIG_PAX_EMUTRAMP
19120 + switch (pax_handle_fetch_fault(regs)) {
19121 + case 2:
19122 + return;
19123 + }
19124 +#endif
19125 +
19126 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19127 + do_group_exit(SIGKILL);
19128 + }
19129 +#endif
19130 +
19131 + }
19132 +#endif
19133
19134 /* User mode accesses just cause a SIGSEGV */
19135 if (error_code & PF_USER) {
19136 @@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19137 return 1;
19138 }
19139
19140 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19141 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19142 +{
19143 + pte_t *pte;
19144 + pmd_t *pmd;
19145 + spinlock_t *ptl;
19146 + unsigned char pte_mask;
19147 +
19148 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19149 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
19150 + return 0;
19151 +
19152 + /* PaX: it's our fault, let's handle it if we can */
19153 +
19154 + /* PaX: take a look at read faults before acquiring any locks */
19155 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19156 + /* instruction fetch attempt from a protected page in user mode */
19157 + up_read(&mm->mmap_sem);
19158 +
19159 +#ifdef CONFIG_PAX_EMUTRAMP
19160 + switch (pax_handle_fetch_fault(regs)) {
19161 + case 2:
19162 + return 1;
19163 + }
19164 +#endif
19165 +
19166 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19167 + do_group_exit(SIGKILL);
19168 + }
19169 +
19170 + pmd = pax_get_pmd(mm, address);
19171 + if (unlikely(!pmd))
19172 + return 0;
19173 +
19174 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19175 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19176 + pte_unmap_unlock(pte, ptl);
19177 + return 0;
19178 + }
19179 +
19180 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19181 + /* write attempt to a protected page in user mode */
19182 + pte_unmap_unlock(pte, ptl);
19183 + return 0;
19184 + }
19185 +
19186 +#ifdef CONFIG_SMP
19187 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19188 +#else
19189 + if (likely(address > get_limit(regs->cs)))
19190 +#endif
19191 + {
19192 + set_pte(pte, pte_mkread(*pte));
19193 + __flush_tlb_one(address);
19194 + pte_unmap_unlock(pte, ptl);
19195 + up_read(&mm->mmap_sem);
19196 + return 1;
19197 + }
19198 +
19199 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19200 +
19201 + /*
19202 + * PaX: fill DTLB with user rights and retry
19203 + */
19204 + __asm__ __volatile__ (
19205 + "orb %2,(%1)\n"
19206 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19207 +/*
19208 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19209 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19210 + * page fault when examined during a TLB load attempt. this is true not only
19211 + * for PTEs holding a non-present entry but also present entries that will
19212 + * raise a page fault (such as those set up by PaX, or the copy-on-write
19213 + * mechanism). in effect it means that we do *not* need to flush the TLBs
19214 + * for our target pages since their PTEs are simply not in the TLBs at all.
19215 +
19216 + * the best thing in omitting it is that we gain around 15-20% speed in the
19217 + * fast path of the page fault handler and can get rid of tracing since we
19218 + * can no longer flush unintended entries.
19219 + */
19220 + "invlpg (%0)\n"
19221 +#endif
19222 + __copyuser_seg"testb $0,(%0)\n"
19223 + "xorb %3,(%1)\n"
19224 + :
19225 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19226 + : "memory", "cc");
19227 + pte_unmap_unlock(pte, ptl);
19228 + up_read(&mm->mmap_sem);
19229 + return 1;
19230 +}
19231 +#endif
19232 +
19233 /*
19234 * Handle a spurious fault caused by a stale TLB entry.
19235 *
19236 @@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19237 static inline int
19238 access_error(unsigned long error_code, struct vm_area_struct *vma)
19239 {
19240 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19241 + return 1;
19242 +
19243 if (error_code & PF_WRITE) {
19244 /* write, present and write, not present: */
19245 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19246 @@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19247 {
19248 struct vm_area_struct *vma;
19249 struct task_struct *tsk;
19250 - unsigned long address;
19251 struct mm_struct *mm;
19252 int fault;
19253 int write = error_code & PF_WRITE;
19254 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19255 (write ? FAULT_FLAG_WRITE : 0);
19256
19257 + /* Get the faulting address: */
19258 + unsigned long address = read_cr2();
19259 +
19260 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19261 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19262 + if (!search_exception_tables(regs->ip)) {
19263 + bad_area_nosemaphore(regs, error_code, address);
19264 + return;
19265 + }
19266 + if (address < PAX_USER_SHADOW_BASE) {
19267 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19268 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19269 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19270 + } else
19271 + address -= PAX_USER_SHADOW_BASE;
19272 + }
19273 +#endif
19274 +
19275 tsk = current;
19276 mm = tsk->mm;
19277
19278 - /* Get the faulting address: */
19279 - address = read_cr2();
19280 -
19281 /*
19282 * Detect and handle instructions that would cause a page fault for
19283 * both a tracked kernel page and a userspace page.
19284 @@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19285 * User-mode registers count as a user access even for any
19286 * potential system fault or CPU buglet:
19287 */
19288 - if (user_mode_vm(regs)) {
19289 + if (user_mode(regs)) {
19290 local_irq_enable();
19291 error_code |= PF_USER;
19292 } else {
19293 @@ -1103,6 +1351,11 @@ retry:
19294 might_sleep();
19295 }
19296
19297 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19298 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19299 + return;
19300 +#endif
19301 +
19302 vma = find_vma(mm, address);
19303 if (unlikely(!vma)) {
19304 bad_area(regs, error_code, address);
19305 @@ -1114,18 +1367,24 @@ retry:
19306 bad_area(regs, error_code, address);
19307 return;
19308 }
19309 - if (error_code & PF_USER) {
19310 - /*
19311 - * Accessing the stack below %sp is always a bug.
19312 - * The large cushion allows instructions like enter
19313 - * and pusha to work. ("enter $65535, $31" pushes
19314 - * 32 pointers and then decrements %sp by 65535.)
19315 - */
19316 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19317 - bad_area(regs, error_code, address);
19318 - return;
19319 - }
19320 + /*
19321 + * Accessing the stack below %sp is always a bug.
19322 + * The large cushion allows instructions like enter
19323 + * and pusha to work. ("enter $65535, $31" pushes
19324 + * 32 pointers and then decrements %sp by 65535.)
19325 + */
19326 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19327 + bad_area(regs, error_code, address);
19328 + return;
19329 }
19330 +
19331 +#ifdef CONFIG_PAX_SEGMEXEC
19332 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19333 + bad_area(regs, error_code, address);
19334 + return;
19335 + }
19336 +#endif
19337 +
19338 if (unlikely(expand_stack(vma, address))) {
19339 bad_area(regs, error_code, address);
19340 return;
19341 @@ -1180,3 +1439,199 @@ good_area:
19342
19343 up_read(&mm->mmap_sem);
19344 }
19345 +
19346 +#ifdef CONFIG_PAX_EMUTRAMP
19347 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19348 +{
19349 + int err;
19350 +
19351 + do { /* PaX: gcc trampoline emulation #1 */
19352 + unsigned char mov1, mov2;
19353 + unsigned short jmp;
19354 + unsigned int addr1, addr2;
19355 +
19356 +#ifdef CONFIG_X86_64
19357 + if ((regs->ip + 11) >> 32)
19358 + break;
19359 +#endif
19360 +
19361 + err = get_user(mov1, (unsigned char __user *)regs->ip);
19362 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19363 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19364 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19365 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19366 +
19367 + if (err)
19368 + break;
19369 +
19370 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19371 + regs->cx = addr1;
19372 + regs->ax = addr2;
19373 + regs->ip = addr2;
19374 + return 2;
19375 + }
19376 + } while (0);
19377 +
19378 + do { /* PaX: gcc trampoline emulation #2 */
19379 + unsigned char mov, jmp;
19380 + unsigned int addr1, addr2;
19381 +
19382 +#ifdef CONFIG_X86_64
19383 + if ((regs->ip + 9) >> 32)
19384 + break;
19385 +#endif
19386 +
19387 + err = get_user(mov, (unsigned char __user *)regs->ip);
19388 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19389 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19390 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19391 +
19392 + if (err)
19393 + break;
19394 +
19395 + if (mov == 0xB9 && jmp == 0xE9) {
19396 + regs->cx = addr1;
19397 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19398 + return 2;
19399 + }
19400 + } while (0);
19401 +
19402 + return 1; /* PaX in action */
19403 +}
19404 +
19405 +#ifdef CONFIG_X86_64
19406 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19407 +{
19408 + int err;
19409 +
19410 + do { /* PaX: gcc trampoline emulation #1 */
19411 + unsigned short mov1, mov2, jmp1;
19412 + unsigned char jmp2;
19413 + unsigned int addr1;
19414 + unsigned long addr2;
19415 +
19416 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19417 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19418 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19419 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19420 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19421 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19422 +
19423 + if (err)
19424 + break;
19425 +
19426 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19427 + regs->r11 = addr1;
19428 + regs->r10 = addr2;
19429 + regs->ip = addr1;
19430 + return 2;
19431 + }
19432 + } while (0);
19433 +
19434 + do { /* PaX: gcc trampoline emulation #2 */
19435 + unsigned short mov1, mov2, jmp1;
19436 + unsigned char jmp2;
19437 + unsigned long addr1, addr2;
19438 +
19439 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19440 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19441 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19442 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19443 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19444 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19445 +
19446 + if (err)
19447 + break;
19448 +
19449 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19450 + regs->r11 = addr1;
19451 + regs->r10 = addr2;
19452 + regs->ip = addr1;
19453 + return 2;
19454 + }
19455 + } while (0);
19456 +
19457 + return 1; /* PaX in action */
19458 +}
19459 +#endif
19460 +
19461 +/*
19462 + * PaX: decide what to do with offenders (regs->ip = fault address)
19463 + *
19464 + * returns 1 when task should be killed
19465 + * 2 when gcc trampoline was detected
19466 + */
19467 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19468 +{
19469 + if (v8086_mode(regs))
19470 + return 1;
19471 +
19472 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19473 + return 1;
19474 +
19475 +#ifdef CONFIG_X86_32
19476 + return pax_handle_fetch_fault_32(regs);
19477 +#else
19478 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19479 + return pax_handle_fetch_fault_32(regs);
19480 + else
19481 + return pax_handle_fetch_fault_64(regs);
19482 +#endif
19483 +}
19484 +#endif
19485 +
19486 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19487 +void pax_report_insns(void *pc, void *sp)
19488 +{
19489 + long i;
19490 +
19491 + printk(KERN_ERR "PAX: bytes at PC: ");
19492 + for (i = 0; i < 20; i++) {
19493 + unsigned char c;
19494 + if (get_user(c, (__force unsigned char __user *)pc+i))
19495 + printk(KERN_CONT "?? ");
19496 + else
19497 + printk(KERN_CONT "%02x ", c);
19498 + }
19499 + printk("\n");
19500 +
19501 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19502 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19503 + unsigned long c;
19504 + if (get_user(c, (__force unsigned long __user *)sp+i))
19505 +#ifdef CONFIG_X86_32
19506 + printk(KERN_CONT "???????? ");
19507 +#else
19508 + printk(KERN_CONT "???????????????? ");
19509 +#endif
19510 + else
19511 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19512 + }
19513 + printk("\n");
19514 +}
19515 +#endif
19516 +
19517 +/**
19518 + * probe_kernel_write(): safely attempt to write to a location
19519 + * @dst: address to write to
19520 + * @src: pointer to the data that shall be written
19521 + * @size: size of the data chunk
19522 + *
19523 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19524 + * happens, handle that and return -EFAULT.
19525 + */
19526 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19527 +{
19528 + long ret;
19529 + mm_segment_t old_fs = get_fs();
19530 +
19531 + set_fs(KERNEL_DS);
19532 + pagefault_disable();
19533 + pax_open_kernel();
19534 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19535 + pax_close_kernel();
19536 + pagefault_enable();
19537 + set_fs(old_fs);
19538 +
19539 + return ret ? -EFAULT : 0;
19540 +}
19541 diff -urNp linux-3.0.4/arch/x86/mm/gup.c linux-3.0.4/arch/x86/mm/gup.c
19542 --- linux-3.0.4/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19543 +++ linux-3.0.4/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19544 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19545 addr = start;
19546 len = (unsigned long) nr_pages << PAGE_SHIFT;
19547 end = start + len;
19548 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19549 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19550 (void __user *)start, len)))
19551 return 0;
19552
19553 diff -urNp linux-3.0.4/arch/x86/mm/highmem_32.c linux-3.0.4/arch/x86/mm/highmem_32.c
19554 --- linux-3.0.4/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19555 +++ linux-3.0.4/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19556 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19557 idx = type + KM_TYPE_NR*smp_processor_id();
19558 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19559 BUG_ON(!pte_none(*(kmap_pte-idx)));
19560 +
19561 + pax_open_kernel();
19562 set_pte(kmap_pte-idx, mk_pte(page, prot));
19563 + pax_close_kernel();
19564
19565 return (void *)vaddr;
19566 }
19567 diff -urNp linux-3.0.4/arch/x86/mm/hugetlbpage.c linux-3.0.4/arch/x86/mm/hugetlbpage.c
19568 --- linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19569 +++ linux-3.0.4/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19570 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19571 struct hstate *h = hstate_file(file);
19572 struct mm_struct *mm = current->mm;
19573 struct vm_area_struct *vma;
19574 - unsigned long start_addr;
19575 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19576 +
19577 +#ifdef CONFIG_PAX_SEGMEXEC
19578 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19579 + pax_task_size = SEGMEXEC_TASK_SIZE;
19580 +#endif
19581 +
19582 + pax_task_size -= PAGE_SIZE;
19583
19584 if (len > mm->cached_hole_size) {
19585 - start_addr = mm->free_area_cache;
19586 + start_addr = mm->free_area_cache;
19587 } else {
19588 - start_addr = TASK_UNMAPPED_BASE;
19589 - mm->cached_hole_size = 0;
19590 + start_addr = mm->mmap_base;
19591 + mm->cached_hole_size = 0;
19592 }
19593
19594 full_search:
19595 @@ -280,26 +287,27 @@ full_search:
19596
19597 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19598 /* At this point: (!vma || addr < vma->vm_end). */
19599 - if (TASK_SIZE - len < addr) {
19600 + if (pax_task_size - len < addr) {
19601 /*
19602 * Start a new search - just in case we missed
19603 * some holes.
19604 */
19605 - if (start_addr != TASK_UNMAPPED_BASE) {
19606 - start_addr = TASK_UNMAPPED_BASE;
19607 + if (start_addr != mm->mmap_base) {
19608 + start_addr = mm->mmap_base;
19609 mm->cached_hole_size = 0;
19610 goto full_search;
19611 }
19612 return -ENOMEM;
19613 }
19614 - if (!vma || addr + len <= vma->vm_start) {
19615 - mm->free_area_cache = addr + len;
19616 - return addr;
19617 - }
19618 + if (check_heap_stack_gap(vma, addr, len))
19619 + break;
19620 if (addr + mm->cached_hole_size < vma->vm_start)
19621 mm->cached_hole_size = vma->vm_start - addr;
19622 addr = ALIGN(vma->vm_end, huge_page_size(h));
19623 }
19624 +
19625 + mm->free_area_cache = addr + len;
19626 + return addr;
19627 }
19628
19629 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19630 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19631 {
19632 struct hstate *h = hstate_file(file);
19633 struct mm_struct *mm = current->mm;
19634 - struct vm_area_struct *vma, *prev_vma;
19635 - unsigned long base = mm->mmap_base, addr = addr0;
19636 + struct vm_area_struct *vma;
19637 + unsigned long base = mm->mmap_base, addr;
19638 unsigned long largest_hole = mm->cached_hole_size;
19639 - int first_time = 1;
19640
19641 /* don't allow allocations above current base */
19642 if (mm->free_area_cache > base)
19643 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19644 largest_hole = 0;
19645 mm->free_area_cache = base;
19646 }
19647 -try_again:
19648 +
19649 /* make sure it can fit in the remaining address space */
19650 if (mm->free_area_cache < len)
19651 goto fail;
19652
19653 /* either no address requested or can't fit in requested address hole */
19654 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19655 + addr = (mm->free_area_cache - len);
19656 do {
19657 + addr &= huge_page_mask(h);
19658 + vma = find_vma(mm, addr);
19659 /*
19660 * Lookup failure means no vma is above this address,
19661 * i.e. return with success:
19662 - */
19663 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19664 - return addr;
19665 -
19666 - /*
19667 * new region fits between prev_vma->vm_end and
19668 * vma->vm_start, use it:
19669 */
19670 - if (addr + len <= vma->vm_start &&
19671 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19672 + if (check_heap_stack_gap(vma, addr, len)) {
19673 /* remember the address as a hint for next time */
19674 - mm->cached_hole_size = largest_hole;
19675 - return (mm->free_area_cache = addr);
19676 - } else {
19677 - /* pull free_area_cache down to the first hole */
19678 - if (mm->free_area_cache == vma->vm_end) {
19679 - mm->free_area_cache = vma->vm_start;
19680 - mm->cached_hole_size = largest_hole;
19681 - }
19682 + mm->cached_hole_size = largest_hole;
19683 + return (mm->free_area_cache = addr);
19684 + }
19685 + /* pull free_area_cache down to the first hole */
19686 + if (mm->free_area_cache == vma->vm_end) {
19687 + mm->free_area_cache = vma->vm_start;
19688 + mm->cached_hole_size = largest_hole;
19689 }
19690
19691 /* remember the largest hole we saw so far */
19692 if (addr + largest_hole < vma->vm_start)
19693 - largest_hole = vma->vm_start - addr;
19694 + largest_hole = vma->vm_start - addr;
19695
19696 /* try just below the current vma->vm_start */
19697 - addr = (vma->vm_start - len) & huge_page_mask(h);
19698 - } while (len <= vma->vm_start);
19699 + addr = skip_heap_stack_gap(vma, len);
19700 + } while (!IS_ERR_VALUE(addr));
19701
19702 fail:
19703 /*
19704 - * if hint left us with no space for the requested
19705 - * mapping then try again:
19706 - */
19707 - if (first_time) {
19708 - mm->free_area_cache = base;
19709 - largest_hole = 0;
19710 - first_time = 0;
19711 - goto try_again;
19712 - }
19713 - /*
19714 * A failed mmap() very likely causes application failure,
19715 * so fall back to the bottom-up function here. This scenario
19716 * can happen with large stack limits and large mmap()
19717 * allocations.
19718 */
19719 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19720 +
19721 +#ifdef CONFIG_PAX_SEGMEXEC
19722 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19723 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19724 + else
19725 +#endif
19726 +
19727 + mm->mmap_base = TASK_UNMAPPED_BASE;
19728 +
19729 +#ifdef CONFIG_PAX_RANDMMAP
19730 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19731 + mm->mmap_base += mm->delta_mmap;
19732 +#endif
19733 +
19734 + mm->free_area_cache = mm->mmap_base;
19735 mm->cached_hole_size = ~0UL;
19736 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19737 len, pgoff, flags);
19738 @@ -386,6 +392,7 @@ fail:
19739 /*
19740 * Restore the topdown base:
19741 */
19742 + mm->mmap_base = base;
19743 mm->free_area_cache = base;
19744 mm->cached_hole_size = ~0UL;
19745
19746 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19747 struct hstate *h = hstate_file(file);
19748 struct mm_struct *mm = current->mm;
19749 struct vm_area_struct *vma;
19750 + unsigned long pax_task_size = TASK_SIZE;
19751
19752 if (len & ~huge_page_mask(h))
19753 return -EINVAL;
19754 - if (len > TASK_SIZE)
19755 +
19756 +#ifdef CONFIG_PAX_SEGMEXEC
19757 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19758 + pax_task_size = SEGMEXEC_TASK_SIZE;
19759 +#endif
19760 +
19761 + pax_task_size -= PAGE_SIZE;
19762 +
19763 + if (len > pax_task_size)
19764 return -ENOMEM;
19765
19766 if (flags & MAP_FIXED) {
19767 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19768 if (addr) {
19769 addr = ALIGN(addr, huge_page_size(h));
19770 vma = find_vma(mm, addr);
19771 - if (TASK_SIZE - len >= addr &&
19772 - (!vma || addr + len <= vma->vm_start))
19773 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19774 return addr;
19775 }
19776 if (mm->get_unmapped_area == arch_get_unmapped_area)
19777 diff -urNp linux-3.0.4/arch/x86/mm/init_32.c linux-3.0.4/arch/x86/mm/init_32.c
19778 --- linux-3.0.4/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
19779 +++ linux-3.0.4/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
19780 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19781 }
19782
19783 /*
19784 - * Creates a middle page table and puts a pointer to it in the
19785 - * given global directory entry. This only returns the gd entry
19786 - * in non-PAE compilation mode, since the middle layer is folded.
19787 - */
19788 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19789 -{
19790 - pud_t *pud;
19791 - pmd_t *pmd_table;
19792 -
19793 -#ifdef CONFIG_X86_PAE
19794 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19795 - if (after_bootmem)
19796 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19797 - else
19798 - pmd_table = (pmd_t *)alloc_low_page();
19799 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19800 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19801 - pud = pud_offset(pgd, 0);
19802 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19803 -
19804 - return pmd_table;
19805 - }
19806 -#endif
19807 - pud = pud_offset(pgd, 0);
19808 - pmd_table = pmd_offset(pud, 0);
19809 -
19810 - return pmd_table;
19811 -}
19812 -
19813 -/*
19814 * Create a page table and place a pointer to it in a middle page
19815 * directory entry:
19816 */
19817 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19818 page_table = (pte_t *)alloc_low_page();
19819
19820 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19821 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19822 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19823 +#else
19824 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19825 +#endif
19826 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19827 }
19828
19829 return pte_offset_kernel(pmd, 0);
19830 }
19831
19832 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19833 +{
19834 + pud_t *pud;
19835 + pmd_t *pmd_table;
19836 +
19837 + pud = pud_offset(pgd, 0);
19838 + pmd_table = pmd_offset(pud, 0);
19839 +
19840 + return pmd_table;
19841 +}
19842 +
19843 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19844 {
19845 int pgd_idx = pgd_index(vaddr);
19846 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19847 int pgd_idx, pmd_idx;
19848 unsigned long vaddr;
19849 pgd_t *pgd;
19850 + pud_t *pud;
19851 pmd_t *pmd;
19852 pte_t *pte = NULL;
19853
19854 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19855 pgd = pgd_base + pgd_idx;
19856
19857 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19858 - pmd = one_md_table_init(pgd);
19859 - pmd = pmd + pmd_index(vaddr);
19860 + pud = pud_offset(pgd, vaddr);
19861 + pmd = pmd_offset(pud, vaddr);
19862 +
19863 +#ifdef CONFIG_X86_PAE
19864 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19865 +#endif
19866 +
19867 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19868 pmd++, pmd_idx++) {
19869 pte = page_table_kmap_check(one_page_table_init(pmd),
19870 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19871 }
19872 }
19873
19874 -static inline int is_kernel_text(unsigned long addr)
19875 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19876 {
19877 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19878 - return 1;
19879 - return 0;
19880 + if ((start > ktla_ktva((unsigned long)_etext) ||
19881 + end <= ktla_ktva((unsigned long)_stext)) &&
19882 + (start > ktla_ktva((unsigned long)_einittext) ||
19883 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19884 +
19885 +#ifdef CONFIG_ACPI_SLEEP
19886 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19887 +#endif
19888 +
19889 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19890 + return 0;
19891 + return 1;
19892 }
19893
19894 /*
19895 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19896 unsigned long last_map_addr = end;
19897 unsigned long start_pfn, end_pfn;
19898 pgd_t *pgd_base = swapper_pg_dir;
19899 - int pgd_idx, pmd_idx, pte_ofs;
19900 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19901 unsigned long pfn;
19902 pgd_t *pgd;
19903 + pud_t *pud;
19904 pmd_t *pmd;
19905 pte_t *pte;
19906 unsigned pages_2m, pages_4k;
19907 @@ -281,8 +282,13 @@ repeat:
19908 pfn = start_pfn;
19909 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19910 pgd = pgd_base + pgd_idx;
19911 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19912 - pmd = one_md_table_init(pgd);
19913 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19914 + pud = pud_offset(pgd, 0);
19915 + pmd = pmd_offset(pud, 0);
19916 +
19917 +#ifdef CONFIG_X86_PAE
19918 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19919 +#endif
19920
19921 if (pfn >= end_pfn)
19922 continue;
19923 @@ -294,14 +300,13 @@ repeat:
19924 #endif
19925 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19926 pmd++, pmd_idx++) {
19927 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19928 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19929
19930 /*
19931 * Map with big pages if possible, otherwise
19932 * create normal page tables:
19933 */
19934 if (use_pse) {
19935 - unsigned int addr2;
19936 pgprot_t prot = PAGE_KERNEL_LARGE;
19937 /*
19938 * first pass will use the same initial
19939 @@ -311,11 +316,7 @@ repeat:
19940 __pgprot(PTE_IDENT_ATTR |
19941 _PAGE_PSE);
19942
19943 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19944 - PAGE_OFFSET + PAGE_SIZE-1;
19945 -
19946 - if (is_kernel_text(addr) ||
19947 - is_kernel_text(addr2))
19948 + if (is_kernel_text(address, address + PMD_SIZE))
19949 prot = PAGE_KERNEL_LARGE_EXEC;
19950
19951 pages_2m++;
19952 @@ -332,7 +333,7 @@ repeat:
19953 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19954 pte += pte_ofs;
19955 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19956 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19957 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19958 pgprot_t prot = PAGE_KERNEL;
19959 /*
19960 * first pass will use the same initial
19961 @@ -340,7 +341,7 @@ repeat:
19962 */
19963 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19964
19965 - if (is_kernel_text(addr))
19966 + if (is_kernel_text(address, address + PAGE_SIZE))
19967 prot = PAGE_KERNEL_EXEC;
19968
19969 pages_4k++;
19970 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19971
19972 pud = pud_offset(pgd, va);
19973 pmd = pmd_offset(pud, va);
19974 - if (!pmd_present(*pmd))
19975 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19976 break;
19977
19978 pte = pte_offset_kernel(pmd, va);
19979 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19980
19981 static void __init pagetable_init(void)
19982 {
19983 - pgd_t *pgd_base = swapper_pg_dir;
19984 -
19985 - permanent_kmaps_init(pgd_base);
19986 + permanent_kmaps_init(swapper_pg_dir);
19987 }
19988
19989 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19990 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19991 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19992
19993 /* user-defined highmem size */
19994 @@ -757,6 +756,12 @@ void __init mem_init(void)
19995
19996 pci_iommu_alloc();
19997
19998 +#ifdef CONFIG_PAX_PER_CPU_PGD
19999 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20000 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20001 + KERNEL_PGD_PTRS);
20002 +#endif
20003 +
20004 #ifdef CONFIG_FLATMEM
20005 BUG_ON(!mem_map);
20006 #endif
20007 @@ -774,7 +779,7 @@ void __init mem_init(void)
20008 set_highmem_pages_init();
20009
20010 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20011 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20012 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20013 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20014
20015 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20016 @@ -815,10 +820,10 @@ void __init mem_init(void)
20017 ((unsigned long)&__init_end -
20018 (unsigned long)&__init_begin) >> 10,
20019
20020 - (unsigned long)&_etext, (unsigned long)&_edata,
20021 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20022 + (unsigned long)&_sdata, (unsigned long)&_edata,
20023 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20024
20025 - (unsigned long)&_text, (unsigned long)&_etext,
20026 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20027 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20028
20029 /*
20030 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20031 if (!kernel_set_to_readonly)
20032 return;
20033
20034 + start = ktla_ktva(start);
20035 pr_debug("Set kernel text: %lx - %lx for read write\n",
20036 start, start+size);
20037
20038 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20039 if (!kernel_set_to_readonly)
20040 return;
20041
20042 + start = ktla_ktva(start);
20043 pr_debug("Set kernel text: %lx - %lx for read only\n",
20044 start, start+size);
20045
20046 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20047 unsigned long start = PFN_ALIGN(_text);
20048 unsigned long size = PFN_ALIGN(_etext) - start;
20049
20050 + start = ktla_ktva(start);
20051 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20052 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20053 size >> 10);
20054 diff -urNp linux-3.0.4/arch/x86/mm/init_64.c linux-3.0.4/arch/x86/mm/init_64.c
20055 --- linux-3.0.4/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20056 +++ linux-3.0.4/arch/x86/mm/init_64.c 2011-08-23 21:47:55.000000000 -0400
20057 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20058 * around without checking the pgd every time.
20059 */
20060
20061 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20062 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20063 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20064
20065 int force_personality32;
20066 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20067
20068 for (address = start; address <= end; address += PGDIR_SIZE) {
20069 const pgd_t *pgd_ref = pgd_offset_k(address);
20070 +
20071 +#ifdef CONFIG_PAX_PER_CPU_PGD
20072 + unsigned long cpu;
20073 +#else
20074 struct page *page;
20075 +#endif
20076
20077 if (pgd_none(*pgd_ref))
20078 continue;
20079
20080 spin_lock(&pgd_lock);
20081 +
20082 +#ifdef CONFIG_PAX_PER_CPU_PGD
20083 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20084 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20085 +#else
20086 list_for_each_entry(page, &pgd_list, lru) {
20087 pgd_t *pgd;
20088 spinlock_t *pgt_lock;
20089 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20090 /* the pgt_lock only for Xen */
20091 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20092 spin_lock(pgt_lock);
20093 +#endif
20094
20095 if (pgd_none(*pgd))
20096 set_pgd(pgd, *pgd_ref);
20097 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20098 BUG_ON(pgd_page_vaddr(*pgd)
20099 != pgd_page_vaddr(*pgd_ref));
20100
20101 +#ifndef CONFIG_PAX_PER_CPU_PGD
20102 spin_unlock(pgt_lock);
20103 +#endif
20104 +
20105 }
20106 spin_unlock(&pgd_lock);
20107 }
20108 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20109 pmd = fill_pmd(pud, vaddr);
20110 pte = fill_pte(pmd, vaddr);
20111
20112 + pax_open_kernel();
20113 set_pte(pte, new_pte);
20114 + pax_close_kernel();
20115
20116 /*
20117 * It's enough to flush this one mapping.
20118 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20119 pgd = pgd_offset_k((unsigned long)__va(phys));
20120 if (pgd_none(*pgd)) {
20121 pud = (pud_t *) spp_getpage();
20122 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20123 - _PAGE_USER));
20124 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20125 }
20126 pud = pud_offset(pgd, (unsigned long)__va(phys));
20127 if (pud_none(*pud)) {
20128 pmd = (pmd_t *) spp_getpage();
20129 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20130 - _PAGE_USER));
20131 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20132 }
20133 pmd = pmd_offset(pud, phys);
20134 BUG_ON(!pmd_none(*pmd));
20135 @@ -693,6 +707,12 @@ void __init mem_init(void)
20136
20137 pci_iommu_alloc();
20138
20139 +#ifdef CONFIG_PAX_PER_CPU_PGD
20140 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20141 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20142 + KERNEL_PGD_PTRS);
20143 +#endif
20144 +
20145 /* clear_bss() already clear the empty_zero_page */
20146
20147 reservedpages = 0;
20148 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
20149 static struct vm_area_struct gate_vma = {
20150 .vm_start = VSYSCALL_START,
20151 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20152 - .vm_page_prot = PAGE_READONLY_EXEC,
20153 - .vm_flags = VM_READ | VM_EXEC
20154 + .vm_page_prot = PAGE_READONLY,
20155 + .vm_flags = VM_READ
20156 };
20157
20158 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
20159 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
20160
20161 const char *arch_vma_name(struct vm_area_struct *vma)
20162 {
20163 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20164 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20165 return "[vdso]";
20166 if (vma == &gate_vma)
20167 return "[vsyscall]";
20168 diff -urNp linux-3.0.4/arch/x86/mm/init.c linux-3.0.4/arch/x86/mm/init.c
20169 --- linux-3.0.4/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20170 +++ linux-3.0.4/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20171 @@ -31,7 +31,7 @@ int direct_gbpages
20172 static void __init find_early_table_space(unsigned long end, int use_pse,
20173 int use_gbpages)
20174 {
20175 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20176 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20177 phys_addr_t base;
20178
20179 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20180 @@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20181 */
20182 int devmem_is_allowed(unsigned long pagenr)
20183 {
20184 - if (pagenr <= 256)
20185 +#ifdef CONFIG_GRKERNSEC_KMEM
20186 + /* allow BDA */
20187 + if (!pagenr)
20188 + return 1;
20189 + /* allow EBDA */
20190 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20191 + return 1;
20192 +#else
20193 + if (!pagenr)
20194 + return 1;
20195 +#ifdef CONFIG_VM86
20196 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20197 + return 1;
20198 +#endif
20199 +#endif
20200 +
20201 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20202 return 1;
20203 +#ifdef CONFIG_GRKERNSEC_KMEM
20204 + /* throw out everything else below 1MB */
20205 + if (pagenr <= 256)
20206 + return 0;
20207 +#endif
20208 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20209 return 0;
20210 if (!page_is_ram(pagenr))
20211 return 1;
20212 +
20213 return 0;
20214 }
20215
20216 @@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20217
20218 void free_initmem(void)
20219 {
20220 +
20221 +#ifdef CONFIG_PAX_KERNEXEC
20222 +#ifdef CONFIG_X86_32
20223 + /* PaX: limit KERNEL_CS to actual size */
20224 + unsigned long addr, limit;
20225 + struct desc_struct d;
20226 + int cpu;
20227 +
20228 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20229 + limit = (limit - 1UL) >> PAGE_SHIFT;
20230 +
20231 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20232 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20233 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20234 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20235 + }
20236 +
20237 + /* PaX: make KERNEL_CS read-only */
20238 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20239 + if (!paravirt_enabled())
20240 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20241 +/*
20242 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20243 + pgd = pgd_offset_k(addr);
20244 + pud = pud_offset(pgd, addr);
20245 + pmd = pmd_offset(pud, addr);
20246 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20247 + }
20248 +*/
20249 +#ifdef CONFIG_X86_PAE
20250 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20251 +/*
20252 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20253 + pgd = pgd_offset_k(addr);
20254 + pud = pud_offset(pgd, addr);
20255 + pmd = pmd_offset(pud, addr);
20256 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20257 + }
20258 +*/
20259 +#endif
20260 +
20261 +#ifdef CONFIG_MODULES
20262 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20263 +#endif
20264 +
20265 +#else
20266 + pgd_t *pgd;
20267 + pud_t *pud;
20268 + pmd_t *pmd;
20269 + unsigned long addr, end;
20270 +
20271 + /* PaX: make kernel code/rodata read-only, rest non-executable */
20272 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20273 + pgd = pgd_offset_k(addr);
20274 + pud = pud_offset(pgd, addr);
20275 + pmd = pmd_offset(pud, addr);
20276 + if (!pmd_present(*pmd))
20277 + continue;
20278 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20279 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20280 + else
20281 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20282 + }
20283 +
20284 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20285 + end = addr + KERNEL_IMAGE_SIZE;
20286 + for (; addr < end; addr += PMD_SIZE) {
20287 + pgd = pgd_offset_k(addr);
20288 + pud = pud_offset(pgd, addr);
20289 + pmd = pmd_offset(pud, addr);
20290 + if (!pmd_present(*pmd))
20291 + continue;
20292 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20293 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20294 + }
20295 +#endif
20296 +
20297 + flush_tlb_all();
20298 +#endif
20299 +
20300 free_init_pages("unused kernel memory",
20301 (unsigned long)(&__init_begin),
20302 (unsigned long)(&__init_end));
20303 diff -urNp linux-3.0.4/arch/x86/mm/iomap_32.c linux-3.0.4/arch/x86/mm/iomap_32.c
20304 --- linux-3.0.4/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
20305 +++ linux-3.0.4/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
20306 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20307 type = kmap_atomic_idx_push();
20308 idx = type + KM_TYPE_NR * smp_processor_id();
20309 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20310 +
20311 + pax_open_kernel();
20312 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20313 + pax_close_kernel();
20314 +
20315 arch_flush_lazy_mmu_mode();
20316
20317 return (void *)vaddr;
20318 diff -urNp linux-3.0.4/arch/x86/mm/ioremap.c linux-3.0.4/arch/x86/mm/ioremap.c
20319 --- linux-3.0.4/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
20320 +++ linux-3.0.4/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
20321 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
20322 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20323 int is_ram = page_is_ram(pfn);
20324
20325 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20326 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20327 return NULL;
20328 WARN_ON_ONCE(is_ram);
20329 }
20330 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20331 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20332
20333 static __initdata int after_paging_init;
20334 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20335 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20336
20337 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20338 {
20339 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20340 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20341
20342 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20343 - memset(bm_pte, 0, sizeof(bm_pte));
20344 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
20345 + pmd_populate_user(&init_mm, pmd, bm_pte);
20346
20347 /*
20348 * The boot-ioremap range spans multiple pmds, for which
20349 diff -urNp linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c
20350 --- linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
20351 +++ linux-3.0.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
20352 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20353 * memory (e.g. tracked pages)? For now, we need this to avoid
20354 * invoking kmemcheck for PnP BIOS calls.
20355 */
20356 - if (regs->flags & X86_VM_MASK)
20357 + if (v8086_mode(regs))
20358 return false;
20359 - if (regs->cs != __KERNEL_CS)
20360 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20361 return false;
20362
20363 pte = kmemcheck_pte_lookup(address);
20364 diff -urNp linux-3.0.4/arch/x86/mm/mmap.c linux-3.0.4/arch/x86/mm/mmap.c
20365 --- linux-3.0.4/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
20366 +++ linux-3.0.4/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
20367 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20368 * Leave an at least ~128 MB hole with possible stack randomization.
20369 */
20370 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20371 -#define MAX_GAP (TASK_SIZE/6*5)
20372 +#define MAX_GAP (pax_task_size/6*5)
20373
20374 /*
20375 * True on X86_32 or when emulating IA32 on X86_64
20376 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20377 return rnd << PAGE_SHIFT;
20378 }
20379
20380 -static unsigned long mmap_base(void)
20381 +static unsigned long mmap_base(struct mm_struct *mm)
20382 {
20383 unsigned long gap = rlimit(RLIMIT_STACK);
20384 + unsigned long pax_task_size = TASK_SIZE;
20385 +
20386 +#ifdef CONFIG_PAX_SEGMEXEC
20387 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20388 + pax_task_size = SEGMEXEC_TASK_SIZE;
20389 +#endif
20390
20391 if (gap < MIN_GAP)
20392 gap = MIN_GAP;
20393 else if (gap > MAX_GAP)
20394 gap = MAX_GAP;
20395
20396 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20397 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20398 }
20399
20400 /*
20401 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20402 * does, but not when emulating X86_32
20403 */
20404 -static unsigned long mmap_legacy_base(void)
20405 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
20406 {
20407 - if (mmap_is_ia32())
20408 + if (mmap_is_ia32()) {
20409 +
20410 +#ifdef CONFIG_PAX_SEGMEXEC
20411 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20412 + return SEGMEXEC_TASK_UNMAPPED_BASE;
20413 + else
20414 +#endif
20415 +
20416 return TASK_UNMAPPED_BASE;
20417 - else
20418 + } else
20419 return TASK_UNMAPPED_BASE + mmap_rnd();
20420 }
20421
20422 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20423 void arch_pick_mmap_layout(struct mm_struct *mm)
20424 {
20425 if (mmap_is_legacy()) {
20426 - mm->mmap_base = mmap_legacy_base();
20427 + mm->mmap_base = mmap_legacy_base(mm);
20428 +
20429 +#ifdef CONFIG_PAX_RANDMMAP
20430 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20431 + mm->mmap_base += mm->delta_mmap;
20432 +#endif
20433 +
20434 mm->get_unmapped_area = arch_get_unmapped_area;
20435 mm->unmap_area = arch_unmap_area;
20436 } else {
20437 - mm->mmap_base = mmap_base();
20438 + mm->mmap_base = mmap_base(mm);
20439 +
20440 +#ifdef CONFIG_PAX_RANDMMAP
20441 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20442 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20443 +#endif
20444 +
20445 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20446 mm->unmap_area = arch_unmap_area_topdown;
20447 }
20448 diff -urNp linux-3.0.4/arch/x86/mm/mmio-mod.c linux-3.0.4/arch/x86/mm/mmio-mod.c
20449 --- linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
20450 +++ linux-3.0.4/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
20451 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20452 break;
20453 default:
20454 {
20455 - unsigned char *ip = (unsigned char *)instptr;
20456 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20457 my_trace->opcode = MMIO_UNKNOWN_OP;
20458 my_trace->width = 0;
20459 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20460 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20461 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20462 void __iomem *addr)
20463 {
20464 - static atomic_t next_id;
20465 + static atomic_unchecked_t next_id;
20466 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20467 /* These are page-unaligned. */
20468 struct mmiotrace_map map = {
20469 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20470 .private = trace
20471 },
20472 .phys = offset,
20473 - .id = atomic_inc_return(&next_id)
20474 + .id = atomic_inc_return_unchecked(&next_id)
20475 };
20476 map.map_id = trace->id;
20477
20478 diff -urNp linux-3.0.4/arch/x86/mm/pageattr.c linux-3.0.4/arch/x86/mm/pageattr.c
20479 --- linux-3.0.4/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20480 +++ linux-3.0.4/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20481 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20482 */
20483 #ifdef CONFIG_PCI_BIOS
20484 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20485 - pgprot_val(forbidden) |= _PAGE_NX;
20486 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20487 #endif
20488
20489 /*
20490 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20491 * Does not cover __inittext since that is gone later on. On
20492 * 64bit we do not enforce !NX on the low mapping
20493 */
20494 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20495 - pgprot_val(forbidden) |= _PAGE_NX;
20496 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20497 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20498
20499 +#ifdef CONFIG_DEBUG_RODATA
20500 /*
20501 * The .rodata section needs to be read-only. Using the pfn
20502 * catches all aliases.
20503 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20504 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20505 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20506 pgprot_val(forbidden) |= _PAGE_RW;
20507 +#endif
20508
20509 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20510 /*
20511 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20512 }
20513 #endif
20514
20515 +#ifdef CONFIG_PAX_KERNEXEC
20516 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20517 + pgprot_val(forbidden) |= _PAGE_RW;
20518 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20519 + }
20520 +#endif
20521 +
20522 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20523
20524 return prot;
20525 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20526 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20527 {
20528 /* change init_mm */
20529 + pax_open_kernel();
20530 set_pte_atomic(kpte, pte);
20531 +
20532 #ifdef CONFIG_X86_32
20533 if (!SHARED_KERNEL_PMD) {
20534 +
20535 +#ifdef CONFIG_PAX_PER_CPU_PGD
20536 + unsigned long cpu;
20537 +#else
20538 struct page *page;
20539 +#endif
20540
20541 +#ifdef CONFIG_PAX_PER_CPU_PGD
20542 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20543 + pgd_t *pgd = get_cpu_pgd(cpu);
20544 +#else
20545 list_for_each_entry(page, &pgd_list, lru) {
20546 - pgd_t *pgd;
20547 + pgd_t *pgd = (pgd_t *)page_address(page);
20548 +#endif
20549 +
20550 pud_t *pud;
20551 pmd_t *pmd;
20552
20553 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20554 + pgd += pgd_index(address);
20555 pud = pud_offset(pgd, address);
20556 pmd = pmd_offset(pud, address);
20557 set_pte_atomic((pte_t *)pmd, pte);
20558 }
20559 }
20560 #endif
20561 + pax_close_kernel();
20562 }
20563
20564 static int
20565 diff -urNp linux-3.0.4/arch/x86/mm/pageattr-test.c linux-3.0.4/arch/x86/mm/pageattr-test.c
20566 --- linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20567 +++ linux-3.0.4/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20568 @@ -36,7 +36,7 @@ enum {
20569
20570 static int pte_testbit(pte_t pte)
20571 {
20572 - return pte_flags(pte) & _PAGE_UNUSED1;
20573 + return pte_flags(pte) & _PAGE_CPA_TEST;
20574 }
20575
20576 struct split_state {
20577 diff -urNp linux-3.0.4/arch/x86/mm/pat.c linux-3.0.4/arch/x86/mm/pat.c
20578 --- linux-3.0.4/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20579 +++ linux-3.0.4/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20580 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20581
20582 if (!entry) {
20583 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20584 - current->comm, current->pid, start, end);
20585 + current->comm, task_pid_nr(current), start, end);
20586 return -EINVAL;
20587 }
20588
20589 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20590 while (cursor < to) {
20591 if (!devmem_is_allowed(pfn)) {
20592 printk(KERN_INFO
20593 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20594 - current->comm, from, to);
20595 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20596 + current->comm, from, to, cursor);
20597 return 0;
20598 }
20599 cursor += PAGE_SIZE;
20600 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20601 printk(KERN_INFO
20602 "%s:%d ioremap_change_attr failed %s "
20603 "for %Lx-%Lx\n",
20604 - current->comm, current->pid,
20605 + current->comm, task_pid_nr(current),
20606 cattr_name(flags),
20607 base, (unsigned long long)(base + size));
20608 return -EINVAL;
20609 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20610 if (want_flags != flags) {
20611 printk(KERN_WARNING
20612 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20613 - current->comm, current->pid,
20614 + current->comm, task_pid_nr(current),
20615 cattr_name(want_flags),
20616 (unsigned long long)paddr,
20617 (unsigned long long)(paddr + size),
20618 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20619 free_memtype(paddr, paddr + size);
20620 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20621 " for %Lx-%Lx, got %s\n",
20622 - current->comm, current->pid,
20623 + current->comm, task_pid_nr(current),
20624 cattr_name(want_flags),
20625 (unsigned long long)paddr,
20626 (unsigned long long)(paddr + size),
20627 diff -urNp linux-3.0.4/arch/x86/mm/pf_in.c linux-3.0.4/arch/x86/mm/pf_in.c
20628 --- linux-3.0.4/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
20629 +++ linux-3.0.4/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
20630 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20631 int i;
20632 enum reason_type rv = OTHERS;
20633
20634 - p = (unsigned char *)ins_addr;
20635 + p = (unsigned char *)ktla_ktva(ins_addr);
20636 p += skip_prefix(p, &prf);
20637 p += get_opcode(p, &opcode);
20638
20639 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20640 struct prefix_bits prf;
20641 int i;
20642
20643 - p = (unsigned char *)ins_addr;
20644 + p = (unsigned char *)ktla_ktva(ins_addr);
20645 p += skip_prefix(p, &prf);
20646 p += get_opcode(p, &opcode);
20647
20648 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20649 struct prefix_bits prf;
20650 int i;
20651
20652 - p = (unsigned char *)ins_addr;
20653 + p = (unsigned char *)ktla_ktva(ins_addr);
20654 p += skip_prefix(p, &prf);
20655 p += get_opcode(p, &opcode);
20656
20657 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
20658 struct prefix_bits prf;
20659 int i;
20660
20661 - p = (unsigned char *)ins_addr;
20662 + p = (unsigned char *)ktla_ktva(ins_addr);
20663 p += skip_prefix(p, &prf);
20664 p += get_opcode(p, &opcode);
20665 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20666 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
20667 struct prefix_bits prf;
20668 int i;
20669
20670 - p = (unsigned char *)ins_addr;
20671 + p = (unsigned char *)ktla_ktva(ins_addr);
20672 p += skip_prefix(p, &prf);
20673 p += get_opcode(p, &opcode);
20674 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20675 diff -urNp linux-3.0.4/arch/x86/mm/pgtable_32.c linux-3.0.4/arch/x86/mm/pgtable_32.c
20676 --- linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
20677 +++ linux-3.0.4/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
20678 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20679 return;
20680 }
20681 pte = pte_offset_kernel(pmd, vaddr);
20682 +
20683 + pax_open_kernel();
20684 if (pte_val(pteval))
20685 set_pte_at(&init_mm, vaddr, pte, pteval);
20686 else
20687 pte_clear(&init_mm, vaddr, pte);
20688 + pax_close_kernel();
20689
20690 /*
20691 * It's enough to flush this one mapping.
20692 diff -urNp linux-3.0.4/arch/x86/mm/pgtable.c linux-3.0.4/arch/x86/mm/pgtable.c
20693 --- linux-3.0.4/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
20694 +++ linux-3.0.4/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
20695 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20696 list_del(&page->lru);
20697 }
20698
20699 -#define UNSHARED_PTRS_PER_PGD \
20700 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20701 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20702 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20703
20704 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20705 +{
20706 + while (count--)
20707 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20708 +}
20709 +#endif
20710 +
20711 +#ifdef CONFIG_PAX_PER_CPU_PGD
20712 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20713 +{
20714 + while (count--)
20715 +
20716 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20717 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20718 +#else
20719 + *dst++ = *src++;
20720 +#endif
20721
20722 +}
20723 +#endif
20724 +
20725 +#ifdef CONFIG_X86_64
20726 +#define pxd_t pud_t
20727 +#define pyd_t pgd_t
20728 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20729 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20730 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20731 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20732 +#define PYD_SIZE PGDIR_SIZE
20733 +#else
20734 +#define pxd_t pmd_t
20735 +#define pyd_t pud_t
20736 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20737 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20738 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20739 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20740 +#define PYD_SIZE PUD_SIZE
20741 +#endif
20742 +
20743 +#ifdef CONFIG_PAX_PER_CPU_PGD
20744 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20745 +static inline void pgd_dtor(pgd_t *pgd) {}
20746 +#else
20747 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20748 {
20749 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20750 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20751 pgd_list_del(pgd);
20752 spin_unlock(&pgd_lock);
20753 }
20754 +#endif
20755
20756 /*
20757 * List of all pgd's needed for non-PAE so it can invalidate entries
20758 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20759 * -- wli
20760 */
20761
20762 -#ifdef CONFIG_X86_PAE
20763 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20764 /*
20765 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20766 * updating the top-level pagetable entries to guarantee the
20767 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20768 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20769 * and initialize the kernel pmds here.
20770 */
20771 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20772 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20773
20774 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20775 {
20776 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20777 */
20778 flush_tlb_mm(mm);
20779 }
20780 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20781 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20782 #else /* !CONFIG_X86_PAE */
20783
20784 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20785 -#define PREALLOCATED_PMDS 0
20786 +#define PREALLOCATED_PXDS 0
20787
20788 #endif /* CONFIG_X86_PAE */
20789
20790 -static void free_pmds(pmd_t *pmds[])
20791 +static void free_pxds(pxd_t *pxds[])
20792 {
20793 int i;
20794
20795 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20796 - if (pmds[i])
20797 - free_page((unsigned long)pmds[i]);
20798 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20799 + if (pxds[i])
20800 + free_page((unsigned long)pxds[i]);
20801 }
20802
20803 -static int preallocate_pmds(pmd_t *pmds[])
20804 +static int preallocate_pxds(pxd_t *pxds[])
20805 {
20806 int i;
20807 bool failed = false;
20808
20809 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20810 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20811 - if (pmd == NULL)
20812 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20813 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20814 + if (pxd == NULL)
20815 failed = true;
20816 - pmds[i] = pmd;
20817 + pxds[i] = pxd;
20818 }
20819
20820 if (failed) {
20821 - free_pmds(pmds);
20822 + free_pxds(pxds);
20823 return -ENOMEM;
20824 }
20825
20826 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20827 * preallocate which never got a corresponding vma will need to be
20828 * freed manually.
20829 */
20830 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20831 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20832 {
20833 int i;
20834
20835 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20836 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20837 pgd_t pgd = pgdp[i];
20838
20839 if (pgd_val(pgd) != 0) {
20840 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20841 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20842
20843 - pgdp[i] = native_make_pgd(0);
20844 + set_pgd(pgdp + i, native_make_pgd(0));
20845
20846 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20847 - pmd_free(mm, pmd);
20848 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20849 + pxd_free(mm, pxd);
20850 }
20851 }
20852 }
20853
20854 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20855 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20856 {
20857 - pud_t *pud;
20858 + pyd_t *pyd;
20859 unsigned long addr;
20860 int i;
20861
20862 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20863 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20864 return;
20865
20866 - pud = pud_offset(pgd, 0);
20867 +#ifdef CONFIG_X86_64
20868 + pyd = pyd_offset(mm, 0L);
20869 +#else
20870 + pyd = pyd_offset(pgd, 0L);
20871 +#endif
20872
20873 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20874 - i++, pud++, addr += PUD_SIZE) {
20875 - pmd_t *pmd = pmds[i];
20876 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20877 + i++, pyd++, addr += PYD_SIZE) {
20878 + pxd_t *pxd = pxds[i];
20879
20880 if (i >= KERNEL_PGD_BOUNDARY)
20881 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20882 - sizeof(pmd_t) * PTRS_PER_PMD);
20883 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20884 + sizeof(pxd_t) * PTRS_PER_PMD);
20885
20886 - pud_populate(mm, pud, pmd);
20887 + pyd_populate(mm, pyd, pxd);
20888 }
20889 }
20890
20891 pgd_t *pgd_alloc(struct mm_struct *mm)
20892 {
20893 pgd_t *pgd;
20894 - pmd_t *pmds[PREALLOCATED_PMDS];
20895 + pxd_t *pxds[PREALLOCATED_PXDS];
20896
20897 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20898
20899 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20900
20901 mm->pgd = pgd;
20902
20903 - if (preallocate_pmds(pmds) != 0)
20904 + if (preallocate_pxds(pxds) != 0)
20905 goto out_free_pgd;
20906
20907 if (paravirt_pgd_alloc(mm) != 0)
20908 - goto out_free_pmds;
20909 + goto out_free_pxds;
20910
20911 /*
20912 * Make sure that pre-populating the pmds is atomic with
20913 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20914 spin_lock(&pgd_lock);
20915
20916 pgd_ctor(mm, pgd);
20917 - pgd_prepopulate_pmd(mm, pgd, pmds);
20918 + pgd_prepopulate_pxd(mm, pgd, pxds);
20919
20920 spin_unlock(&pgd_lock);
20921
20922 return pgd;
20923
20924 -out_free_pmds:
20925 - free_pmds(pmds);
20926 +out_free_pxds:
20927 + free_pxds(pxds);
20928 out_free_pgd:
20929 free_page((unsigned long)pgd);
20930 out:
20931 @@ -295,7 +344,7 @@ out:
20932
20933 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20934 {
20935 - pgd_mop_up_pmds(mm, pgd);
20936 + pgd_mop_up_pxds(mm, pgd);
20937 pgd_dtor(pgd);
20938 paravirt_pgd_free(mm, pgd);
20939 free_page((unsigned long)pgd);
20940 diff -urNp linux-3.0.4/arch/x86/mm/setup_nx.c linux-3.0.4/arch/x86/mm/setup_nx.c
20941 --- linux-3.0.4/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
20942 +++ linux-3.0.4/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
20943 @@ -5,8 +5,10 @@
20944 #include <asm/pgtable.h>
20945 #include <asm/proto.h>
20946
20947 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20948 static int disable_nx __cpuinitdata;
20949
20950 +#ifndef CONFIG_PAX_PAGEEXEC
20951 /*
20952 * noexec = on|off
20953 *
20954 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20955 return 0;
20956 }
20957 early_param("noexec", noexec_setup);
20958 +#endif
20959 +
20960 +#endif
20961
20962 void __cpuinit x86_configure_nx(void)
20963 {
20964 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20965 if (cpu_has_nx && !disable_nx)
20966 __supported_pte_mask |= _PAGE_NX;
20967 else
20968 +#endif
20969 __supported_pte_mask &= ~_PAGE_NX;
20970 }
20971
20972 diff -urNp linux-3.0.4/arch/x86/mm/tlb.c linux-3.0.4/arch/x86/mm/tlb.c
20973 --- linux-3.0.4/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
20974 +++ linux-3.0.4/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
20975 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20976 BUG();
20977 cpumask_clear_cpu(cpu,
20978 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20979 +
20980 +#ifndef CONFIG_PAX_PER_CPU_PGD
20981 load_cr3(swapper_pg_dir);
20982 +#endif
20983 +
20984 }
20985 EXPORT_SYMBOL_GPL(leave_mm);
20986
20987 diff -urNp linux-3.0.4/arch/x86/net/bpf_jit_comp.c linux-3.0.4/arch/x86/net/bpf_jit_comp.c
20988 --- linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
20989 +++ linux-3.0.4/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
20990 @@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
20991 module_free(NULL, image);
20992 return;
20993 }
20994 + pax_open_kernel();
20995 memcpy(image + proglen, temp, ilen);
20996 + pax_close_kernel();
20997 }
20998 proglen += ilen;
20999 addrs[i] = proglen;
21000 @@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21001 break;
21002 }
21003 if (proglen == oldproglen) {
21004 - image = module_alloc(max_t(unsigned int,
21005 + image = module_alloc_exec(max_t(unsigned int,
21006 proglen,
21007 sizeof(struct work_struct)));
21008 if (!image)
21009 diff -urNp linux-3.0.4/arch/x86/oprofile/backtrace.c linux-3.0.4/arch/x86/oprofile/backtrace.c
21010 --- linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21011 +++ linux-3.0.4/arch/x86/oprofile/backtrace.c 2011-08-23 21:47:55.000000000 -0400
21012 @@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21013 {
21014 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21015
21016 - if (!user_mode_vm(regs)) {
21017 + if (!user_mode(regs)) {
21018 unsigned long stack = kernel_stack_pointer(regs);
21019 if (depth)
21020 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21021 diff -urNp linux-3.0.4/arch/x86/pci/mrst.c linux-3.0.4/arch/x86/pci/mrst.c
21022 --- linux-3.0.4/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21023 +++ linux-3.0.4/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21024 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21025 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21026 pci_mmcfg_late_init();
21027 pcibios_enable_irq = mrst_pci_irq_enable;
21028 - pci_root_ops = pci_mrst_ops;
21029 + pax_open_kernel();
21030 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21031 + pax_close_kernel();
21032 /* Continue with standard init */
21033 return 1;
21034 }
21035 diff -urNp linux-3.0.4/arch/x86/pci/pcbios.c linux-3.0.4/arch/x86/pci/pcbios.c
21036 --- linux-3.0.4/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21037 +++ linux-3.0.4/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21038 @@ -79,50 +79,93 @@ union bios32 {
21039 static struct {
21040 unsigned long address;
21041 unsigned short segment;
21042 -} bios32_indirect = { 0, __KERNEL_CS };
21043 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21044
21045 /*
21046 * Returns the entry point for the given service, NULL on error
21047 */
21048
21049 -static unsigned long bios32_service(unsigned long service)
21050 +static unsigned long __devinit bios32_service(unsigned long service)
21051 {
21052 unsigned char return_code; /* %al */
21053 unsigned long address; /* %ebx */
21054 unsigned long length; /* %ecx */
21055 unsigned long entry; /* %edx */
21056 unsigned long flags;
21057 + struct desc_struct d, *gdt;
21058
21059 local_irq_save(flags);
21060 - __asm__("lcall *(%%edi); cld"
21061 +
21062 + gdt = get_cpu_gdt_table(smp_processor_id());
21063 +
21064 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21065 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21066 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21067 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21068 +
21069 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21070 : "=a" (return_code),
21071 "=b" (address),
21072 "=c" (length),
21073 "=d" (entry)
21074 : "0" (service),
21075 "1" (0),
21076 - "D" (&bios32_indirect));
21077 + "D" (&bios32_indirect),
21078 + "r"(__PCIBIOS_DS)
21079 + : "memory");
21080 +
21081 + pax_open_kernel();
21082 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21083 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21084 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21085 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21086 + pax_close_kernel();
21087 +
21088 local_irq_restore(flags);
21089
21090 switch (return_code) {
21091 - case 0:
21092 - return address + entry;
21093 - case 0x80: /* Not present */
21094 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21095 - return 0;
21096 - default: /* Shouldn't happen */
21097 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21098 - service, return_code);
21099 + case 0: {
21100 + int cpu;
21101 + unsigned char flags;
21102 +
21103 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21104 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21105 + printk(KERN_WARNING "bios32_service: not valid\n");
21106 return 0;
21107 + }
21108 + address = address + PAGE_OFFSET;
21109 + length += 16UL; /* some BIOSs underreport this... */
21110 + flags = 4;
21111 + if (length >= 64*1024*1024) {
21112 + length >>= PAGE_SHIFT;
21113 + flags |= 8;
21114 + }
21115 +
21116 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21117 + gdt = get_cpu_gdt_table(cpu);
21118 + pack_descriptor(&d, address, length, 0x9b, flags);
21119 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21120 + pack_descriptor(&d, address, length, 0x93, flags);
21121 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21122 + }
21123 + return entry;
21124 + }
21125 + case 0x80: /* Not present */
21126 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21127 + return 0;
21128 + default: /* Shouldn't happen */
21129 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21130 + service, return_code);
21131 + return 0;
21132 }
21133 }
21134
21135 static struct {
21136 unsigned long address;
21137 unsigned short segment;
21138 -} pci_indirect = { 0, __KERNEL_CS };
21139 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21140
21141 -static int pci_bios_present;
21142 +static int pci_bios_present __read_only;
21143
21144 static int __devinit check_pcibios(void)
21145 {
21146 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21147 unsigned long flags, pcibios_entry;
21148
21149 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21150 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21151 + pci_indirect.address = pcibios_entry;
21152
21153 local_irq_save(flags);
21154 - __asm__(
21155 - "lcall *(%%edi); cld\n\t"
21156 + __asm__("movw %w6, %%ds\n\t"
21157 + "lcall *%%ss:(%%edi); cld\n\t"
21158 + "push %%ss\n\t"
21159 + "pop %%ds\n\t"
21160 "jc 1f\n\t"
21161 "xor %%ah, %%ah\n"
21162 "1:"
21163 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21164 "=b" (ebx),
21165 "=c" (ecx)
21166 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21167 - "D" (&pci_indirect)
21168 + "D" (&pci_indirect),
21169 + "r" (__PCIBIOS_DS)
21170 : "memory");
21171 local_irq_restore(flags);
21172
21173 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21174
21175 switch (len) {
21176 case 1:
21177 - __asm__("lcall *(%%esi); cld\n\t"
21178 + __asm__("movw %w6, %%ds\n\t"
21179 + "lcall *%%ss:(%%esi); cld\n\t"
21180 + "push %%ss\n\t"
21181 + "pop %%ds\n\t"
21182 "jc 1f\n\t"
21183 "xor %%ah, %%ah\n"
21184 "1:"
21185 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
21186 : "1" (PCIBIOS_READ_CONFIG_BYTE),
21187 "b" (bx),
21188 "D" ((long)reg),
21189 - "S" (&pci_indirect));
21190 + "S" (&pci_indirect),
21191 + "r" (__PCIBIOS_DS));
21192 /*
21193 * Zero-extend the result beyond 8 bits, do not trust the
21194 * BIOS having done it:
21195 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
21196 *value &= 0xff;
21197 break;
21198 case 2:
21199 - __asm__("lcall *(%%esi); cld\n\t"
21200 + __asm__("movw %w6, %%ds\n\t"
21201 + "lcall *%%ss:(%%esi); cld\n\t"
21202 + "push %%ss\n\t"
21203 + "pop %%ds\n\t"
21204 "jc 1f\n\t"
21205 "xor %%ah, %%ah\n"
21206 "1:"
21207 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
21208 : "1" (PCIBIOS_READ_CONFIG_WORD),
21209 "b" (bx),
21210 "D" ((long)reg),
21211 - "S" (&pci_indirect));
21212 + "S" (&pci_indirect),
21213 + "r" (__PCIBIOS_DS));
21214 /*
21215 * Zero-extend the result beyond 16 bits, do not trust the
21216 * BIOS having done it:
21217 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
21218 *value &= 0xffff;
21219 break;
21220 case 4:
21221 - __asm__("lcall *(%%esi); cld\n\t"
21222 + __asm__("movw %w6, %%ds\n\t"
21223 + "lcall *%%ss:(%%esi); cld\n\t"
21224 + "push %%ss\n\t"
21225 + "pop %%ds\n\t"
21226 "jc 1f\n\t"
21227 "xor %%ah, %%ah\n"
21228 "1:"
21229 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
21230 : "1" (PCIBIOS_READ_CONFIG_DWORD),
21231 "b" (bx),
21232 "D" ((long)reg),
21233 - "S" (&pci_indirect));
21234 + "S" (&pci_indirect),
21235 + "r" (__PCIBIOS_DS));
21236 break;
21237 }
21238
21239 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
21240
21241 switch (len) {
21242 case 1:
21243 - __asm__("lcall *(%%esi); cld\n\t"
21244 + __asm__("movw %w6, %%ds\n\t"
21245 + "lcall *%%ss:(%%esi); cld\n\t"
21246 + "push %%ss\n\t"
21247 + "pop %%ds\n\t"
21248 "jc 1f\n\t"
21249 "xor %%ah, %%ah\n"
21250 "1:"
21251 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
21252 "c" (value),
21253 "b" (bx),
21254 "D" ((long)reg),
21255 - "S" (&pci_indirect));
21256 + "S" (&pci_indirect),
21257 + "r" (__PCIBIOS_DS));
21258 break;
21259 case 2:
21260 - __asm__("lcall *(%%esi); cld\n\t"
21261 + __asm__("movw %w6, %%ds\n\t"
21262 + "lcall *%%ss:(%%esi); cld\n\t"
21263 + "push %%ss\n\t"
21264 + "pop %%ds\n\t"
21265 "jc 1f\n\t"
21266 "xor %%ah, %%ah\n"
21267 "1:"
21268 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
21269 "c" (value),
21270 "b" (bx),
21271 "D" ((long)reg),
21272 - "S" (&pci_indirect));
21273 + "S" (&pci_indirect),
21274 + "r" (__PCIBIOS_DS));
21275 break;
21276 case 4:
21277 - __asm__("lcall *(%%esi); cld\n\t"
21278 + __asm__("movw %w6, %%ds\n\t"
21279 + "lcall *%%ss:(%%esi); cld\n\t"
21280 + "push %%ss\n\t"
21281 + "pop %%ds\n\t"
21282 "jc 1f\n\t"
21283 "xor %%ah, %%ah\n"
21284 "1:"
21285 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
21286 "c" (value),
21287 "b" (bx),
21288 "D" ((long)reg),
21289 - "S" (&pci_indirect));
21290 + "S" (&pci_indirect),
21291 + "r" (__PCIBIOS_DS));
21292 break;
21293 }
21294
21295 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
21296
21297 DBG("PCI: Fetching IRQ routing table... ");
21298 __asm__("push %%es\n\t"
21299 + "movw %w8, %%ds\n\t"
21300 "push %%ds\n\t"
21301 "pop %%es\n\t"
21302 - "lcall *(%%esi); cld\n\t"
21303 + "lcall *%%ss:(%%esi); cld\n\t"
21304 "pop %%es\n\t"
21305 + "push %%ss\n\t"
21306 + "pop %%ds\n"
21307 "jc 1f\n\t"
21308 "xor %%ah, %%ah\n"
21309 "1:"
21310 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21311 "1" (0),
21312 "D" ((long) &opt),
21313 "S" (&pci_indirect),
21314 - "m" (opt)
21315 + "m" (opt),
21316 + "r" (__PCIBIOS_DS)
21317 : "memory");
21318 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21319 if (ret & 0xff00)
21320 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21321 {
21322 int ret;
21323
21324 - __asm__("lcall *(%%esi); cld\n\t"
21325 + __asm__("movw %w5, %%ds\n\t"
21326 + "lcall *%%ss:(%%esi); cld\n\t"
21327 + "push %%ss\n\t"
21328 + "pop %%ds\n"
21329 "jc 1f\n\t"
21330 "xor %%ah, %%ah\n"
21331 "1:"
21332 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21333 : "0" (PCIBIOS_SET_PCI_HW_INT),
21334 "b" ((dev->bus->number << 8) | dev->devfn),
21335 "c" ((irq << 8) | (pin + 10)),
21336 - "S" (&pci_indirect));
21337 + "S" (&pci_indirect),
21338 + "r" (__PCIBIOS_DS));
21339 return !(ret & 0xff00);
21340 }
21341 EXPORT_SYMBOL(pcibios_set_irq_routing);
21342 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_32.c linux-3.0.4/arch/x86/platform/efi/efi_32.c
21343 --- linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
21344 +++ linux-3.0.4/arch/x86/platform/efi/efi_32.c 2011-09-19 09:16:58.000000000 -0400
21345 @@ -38,70 +38,56 @@
21346 */
21347
21348 static unsigned long efi_rt_eflags;
21349 -static pgd_t efi_bak_pg_dir_pointer[2];
21350 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21351
21352 -void efi_call_phys_prelog(void)
21353 +void __init efi_call_phys_prelog(void)
21354 {
21355 - unsigned long cr4;
21356 - unsigned long temp;
21357 struct desc_ptr gdt_descr;
21358
21359 - local_irq_save(efi_rt_eflags);
21360 +#ifdef CONFIG_PAX_KERNEXEC
21361 + struct desc_struct d;
21362 +#endif
21363
21364 - /*
21365 - * If I don't have PAE, I should just duplicate two entries in page
21366 - * directory. If I have PAE, I just need to duplicate one entry in
21367 - * page directory.
21368 - */
21369 - cr4 = read_cr4_safe();
21370 + local_irq_save(efi_rt_eflags);
21371
21372 - if (cr4 & X86_CR4_PAE) {
21373 - efi_bak_pg_dir_pointer[0].pgd =
21374 - swapper_pg_dir[pgd_index(0)].pgd;
21375 - swapper_pg_dir[0].pgd =
21376 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21377 - } else {
21378 - efi_bak_pg_dir_pointer[0].pgd =
21379 - swapper_pg_dir[pgd_index(0)].pgd;
21380 - efi_bak_pg_dir_pointer[1].pgd =
21381 - swapper_pg_dir[pgd_index(0x400000)].pgd;
21382 - swapper_pg_dir[pgd_index(0)].pgd =
21383 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21384 - temp = PAGE_OFFSET + 0x400000;
21385 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21386 - swapper_pg_dir[pgd_index(temp)].pgd;
21387 - }
21388 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21389 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21390 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21391
21392 /*
21393 * After the lock is released, the original page table is restored.
21394 */
21395 __flush_tlb_all();
21396
21397 +#ifdef CONFIG_PAX_KERNEXEC
21398 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
21399 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_CS, &d, DESCTYPE_S);
21400 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
21401 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_DS, &d, DESCTYPE_S);
21402 +#endif
21403 +
21404 gdt_descr.address = __pa(get_cpu_gdt_table(0));
21405 gdt_descr.size = GDT_SIZE - 1;
21406 load_gdt(&gdt_descr);
21407 }
21408
21409 -void efi_call_phys_epilog(void)
21410 +void __init efi_call_phys_epilog(void)
21411 {
21412 - unsigned long cr4;
21413 struct desc_ptr gdt_descr;
21414
21415 +#ifdef CONFIG_PAX_KERNEXEC
21416 + struct desc_struct d;
21417 +
21418 + memset(&d, 0, sizeof d);
21419 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_CS, &d, DESCTYPE_S);
21420 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_EFI_DS, &d, DESCTYPE_S);
21421 +#endif
21422 +
21423 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21424 gdt_descr.size = GDT_SIZE - 1;
21425 load_gdt(&gdt_descr);
21426
21427 - cr4 = read_cr4_safe();
21428 -
21429 - if (cr4 & X86_CR4_PAE) {
21430 - swapper_pg_dir[pgd_index(0)].pgd =
21431 - efi_bak_pg_dir_pointer[0].pgd;
21432 - } else {
21433 - swapper_pg_dir[pgd_index(0)].pgd =
21434 - efi_bak_pg_dir_pointer[0].pgd;
21435 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21436 - efi_bak_pg_dir_pointer[1].pgd;
21437 - }
21438 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21439
21440 /*
21441 * After the lock is released, the original page table is restored.
21442 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S
21443 --- linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
21444 +++ linux-3.0.4/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
21445 @@ -6,7 +6,9 @@
21446 */
21447
21448 #include <linux/linkage.h>
21449 +#include <linux/init.h>
21450 #include <asm/page_types.h>
21451 +#include <asm/segment.h>
21452
21453 /*
21454 * efi_call_phys(void *, ...) is a function with variable parameters.
21455 @@ -20,7 +22,7 @@
21456 * service functions will comply with gcc calling convention, too.
21457 */
21458
21459 -.text
21460 +__INIT
21461 ENTRY(efi_call_phys)
21462 /*
21463 * 0. The function can only be called in Linux kernel. So CS has been
21464 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
21465 * The mapping of lower virtual memory has been created in prelog and
21466 * epilog.
21467 */
21468 - movl $1f, %edx
21469 - subl $__PAGE_OFFSET, %edx
21470 - jmp *%edx
21471 + movl $(__KERNEXEC_EFI_DS), %edx
21472 + mov %edx, %ds
21473 + mov %edx, %es
21474 + mov %edx, %ss
21475 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21476 1:
21477
21478 /*
21479 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
21480 * parameter 2, ..., param n. To make things easy, we save the return
21481 * address of efi_call_phys in a global variable.
21482 */
21483 - popl %edx
21484 - movl %edx, saved_return_addr
21485 - /* get the function pointer into ECX*/
21486 - popl %ecx
21487 - movl %ecx, efi_rt_function_ptr
21488 - movl $2f, %edx
21489 - subl $__PAGE_OFFSET, %edx
21490 - pushl %edx
21491 + popl (saved_return_addr)
21492 + popl (efi_rt_function_ptr)
21493
21494 /*
21495 * 3. Clear PG bit in %CR0.
21496 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
21497 /*
21498 * 5. Call the physical function.
21499 */
21500 - jmp *%ecx
21501 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21502
21503 -2:
21504 /*
21505 * 6. After EFI runtime service returns, control will return to
21506 * following instruction. We'd better readjust stack pointer first.
21507 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
21508 movl %cr0, %edx
21509 orl $0x80000000, %edx
21510 movl %edx, %cr0
21511 - jmp 1f
21512 -1:
21513 +
21514 /*
21515 * 8. Now restore the virtual mode from flat mode by
21516 * adding EIP with PAGE_OFFSET.
21517 */
21518 - movl $1f, %edx
21519 - jmp *%edx
21520 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
21521 1:
21522 + movl $(__KERNEL_DS), %edx
21523 + mov %edx, %ds
21524 + mov %edx, %es
21525 + mov %edx, %ss
21526
21527 /*
21528 * 9. Balance the stack. And because EAX contain the return value,
21529 * we'd better not clobber it.
21530 */
21531 - leal efi_rt_function_ptr, %edx
21532 - movl (%edx), %ecx
21533 - pushl %ecx
21534 + pushl (efi_rt_function_ptr)
21535
21536 /*
21537 - * 10. Push the saved return address onto the stack and return.
21538 + * 10. Return to the saved return address.
21539 */
21540 - leal saved_return_addr, %edx
21541 - movl (%edx), %ecx
21542 - pushl %ecx
21543 - ret
21544 + jmpl *(saved_return_addr)
21545 ENDPROC(efi_call_phys)
21546 .previous
21547
21548 -.data
21549 +__INITDATA
21550 saved_return_addr:
21551 .long 0
21552 efi_rt_function_ptr:
21553 diff -urNp linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S
21554 --- linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
21555 +++ linux-3.0.4/arch/x86/platform/efi/efi_stub_64.S 2011-09-17 18:31:51.000000000 -0400
21556 @@ -40,6 +40,9 @@ ENTRY(efi_call0)
21557 call *%rdi
21558 addq $32, %rsp
21559 RESTORE_XMM
21560 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21561 + orb $0x80, 0x7(%rsp)
21562 +#endif
21563 ret
21564 ENDPROC(efi_call0)
21565
21566 @@ -50,6 +53,9 @@ ENTRY(efi_call1)
21567 call *%rdi
21568 addq $32, %rsp
21569 RESTORE_XMM
21570 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21571 + orb $0x80, 0x7(%rsp)
21572 +#endif
21573 ret
21574 ENDPROC(efi_call1)
21575
21576 @@ -60,6 +66,9 @@ ENTRY(efi_call2)
21577 call *%rdi
21578 addq $32, %rsp
21579 RESTORE_XMM
21580 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21581 + orb $0x80, 0x7(%rsp)
21582 +#endif
21583 ret
21584 ENDPROC(efi_call2)
21585
21586 @@ -71,6 +80,9 @@ ENTRY(efi_call3)
21587 call *%rdi
21588 addq $32, %rsp
21589 RESTORE_XMM
21590 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21591 + orb $0x80, 0x7(%rsp)
21592 +#endif
21593 ret
21594 ENDPROC(efi_call3)
21595
21596 @@ -83,6 +95,9 @@ ENTRY(efi_call4)
21597 call *%rdi
21598 addq $32, %rsp
21599 RESTORE_XMM
21600 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21601 + orb $0x80, 0x7(%rsp)
21602 +#endif
21603 ret
21604 ENDPROC(efi_call4)
21605
21606 @@ -96,6 +111,9 @@ ENTRY(efi_call5)
21607 call *%rdi
21608 addq $48, %rsp
21609 RESTORE_XMM
21610 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21611 + orb $0x80, 0x7(%rsp)
21612 +#endif
21613 ret
21614 ENDPROC(efi_call5)
21615
21616 @@ -112,5 +130,8 @@ ENTRY(efi_call6)
21617 call *%rdi
21618 addq $48, %rsp
21619 RESTORE_XMM
21620 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
21621 + orb $0x80, 0x7(%rsp)
21622 +#endif
21623 ret
21624 ENDPROC(efi_call6)
21625 diff -urNp linux-3.0.4/arch/x86/platform/mrst/mrst.c linux-3.0.4/arch/x86/platform/mrst/mrst.c
21626 --- linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
21627 +++ linux-3.0.4/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
21628 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21629 }
21630
21631 /* Reboot and power off are handled by the SCU on a MID device */
21632 -static void mrst_power_off(void)
21633 +static __noreturn void mrst_power_off(void)
21634 {
21635 intel_scu_ipc_simple_command(0xf1, 1);
21636 + BUG();
21637 }
21638
21639 -static void mrst_reboot(void)
21640 +static __noreturn void mrst_reboot(void)
21641 {
21642 intel_scu_ipc_simple_command(0xf1, 0);
21643 + BUG();
21644 }
21645
21646 /*
21647 diff -urNp linux-3.0.4/arch/x86/platform/uv/tlb_uv.c linux-3.0.4/arch/x86/platform/uv/tlb_uv.c
21648 --- linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
21649 +++ linux-3.0.4/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
21650 @@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
21651 cpumask_t mask;
21652 struct reset_args reset_args;
21653
21654 + pax_track_stack();
21655 +
21656 reset_args.sender = sender;
21657 cpus_clear(mask);
21658 /* find a single cpu for each uvhub in this distribution mask */
21659 diff -urNp linux-3.0.4/arch/x86/power/cpu.c linux-3.0.4/arch/x86/power/cpu.c
21660 --- linux-3.0.4/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
21661 +++ linux-3.0.4/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
21662 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21663 static void fix_processor_context(void)
21664 {
21665 int cpu = smp_processor_id();
21666 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21667 + struct tss_struct *t = init_tss + cpu;
21668
21669 set_tss_desc(cpu, t); /*
21670 * This just modifies memory; should not be
21671 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21672 */
21673
21674 #ifdef CONFIG_X86_64
21675 + pax_open_kernel();
21676 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21677 + pax_close_kernel();
21678
21679 syscall_init(); /* This sets MSR_*STAR and related */
21680 #endif
21681 diff -urNp linux-3.0.4/arch/x86/vdso/Makefile linux-3.0.4/arch/x86/vdso/Makefile
21682 --- linux-3.0.4/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
21683 +++ linux-3.0.4/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
21684 @@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
21685 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21686 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21687
21688 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21689 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21690 GCOV_PROFILE := n
21691
21692 #
21693 diff -urNp linux-3.0.4/arch/x86/vdso/vdso32-setup.c linux-3.0.4/arch/x86/vdso/vdso32-setup.c
21694 --- linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
21695 +++ linux-3.0.4/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
21696 @@ -25,6 +25,7 @@
21697 #include <asm/tlbflush.h>
21698 #include <asm/vdso.h>
21699 #include <asm/proto.h>
21700 +#include <asm/mman.h>
21701
21702 enum {
21703 VDSO_DISABLED = 0,
21704 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21705 void enable_sep_cpu(void)
21706 {
21707 int cpu = get_cpu();
21708 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21709 + struct tss_struct *tss = init_tss + cpu;
21710
21711 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21712 put_cpu();
21713 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21714 gate_vma.vm_start = FIXADDR_USER_START;
21715 gate_vma.vm_end = FIXADDR_USER_END;
21716 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21717 - gate_vma.vm_page_prot = __P101;
21718 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21719 /*
21720 * Make sure the vDSO gets into every core dump.
21721 * Dumping its contents makes post-mortem fully interpretable later
21722 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21723 if (compat)
21724 addr = VDSO_HIGH_BASE;
21725 else {
21726 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21727 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21728 if (IS_ERR_VALUE(addr)) {
21729 ret = addr;
21730 goto up_fail;
21731 }
21732 }
21733
21734 - current->mm->context.vdso = (void *)addr;
21735 + current->mm->context.vdso = addr;
21736
21737 if (compat_uses_vma || !compat) {
21738 /*
21739 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21740 }
21741
21742 current_thread_info()->sysenter_return =
21743 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21744 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21745
21746 up_fail:
21747 if (ret)
21748 - current->mm->context.vdso = NULL;
21749 + current->mm->context.vdso = 0;
21750
21751 up_write(&mm->mmap_sem);
21752
21753 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21754
21755 const char *arch_vma_name(struct vm_area_struct *vma)
21756 {
21757 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21758 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21759 return "[vdso]";
21760 +
21761 +#ifdef CONFIG_PAX_SEGMEXEC
21762 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21763 + return "[vdso]";
21764 +#endif
21765 +
21766 return NULL;
21767 }
21768
21769 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21770 * Check to see if the corresponding task was created in compat vdso
21771 * mode.
21772 */
21773 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21774 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21775 return &gate_vma;
21776 return NULL;
21777 }
21778 diff -urNp linux-3.0.4/arch/x86/vdso/vma.c linux-3.0.4/arch/x86/vdso/vma.c
21779 --- linux-3.0.4/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
21780 +++ linux-3.0.4/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
21781 @@ -15,18 +15,19 @@
21782 #include <asm/proto.h>
21783 #include <asm/vdso.h>
21784
21785 -unsigned int __read_mostly vdso_enabled = 1;
21786 -
21787 extern char vdso_start[], vdso_end[];
21788 extern unsigned short vdso_sync_cpuid;
21789 +extern char __vsyscall_0;
21790
21791 static struct page **vdso_pages;
21792 +static struct page *vsyscall_page;
21793 static unsigned vdso_size;
21794
21795 static int __init init_vdso_vars(void)
21796 {
21797 - int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
21798 - int i;
21799 + size_t nbytes = vdso_end - vdso_start;
21800 + size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
21801 + size_t i;
21802
21803 vdso_size = npages << PAGE_SHIFT;
21804 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
21805 @@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
21806 goto oom;
21807 for (i = 0; i < npages; i++) {
21808 struct page *p;
21809 - p = alloc_page(GFP_KERNEL);
21810 + p = alloc_page(GFP_KERNEL | __GFP_ZERO);
21811 if (!p)
21812 goto oom;
21813 vdso_pages[i] = p;
21814 - copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
21815 + memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
21816 + nbytes -= PAGE_SIZE;
21817 }
21818 + vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
21819
21820 return 0;
21821
21822 oom:
21823 - printk("Cannot allocate vdso\n");
21824 - vdso_enabled = 0;
21825 - return -ENOMEM;
21826 + panic("Cannot allocate vdso\n");
21827 }
21828 subsys_initcall(init_vdso_vars);
21829
21830 @@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
21831 unsigned long addr;
21832 int ret;
21833
21834 - if (!vdso_enabled)
21835 - return 0;
21836 -
21837 down_write(&mm->mmap_sem);
21838 - addr = vdso_addr(mm->start_stack, vdso_size);
21839 - addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
21840 + addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
21841 + addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
21842 if (IS_ERR_VALUE(addr)) {
21843 ret = addr;
21844 goto up_fail;
21845 }
21846
21847 - current->mm->context.vdso = (void *)addr;
21848 + mm->context.vdso = addr + PAGE_SIZE;
21849
21850 - ret = install_special_mapping(mm, addr, vdso_size,
21851 + ret = install_special_mapping(mm, addr, PAGE_SIZE,
21852 VM_READ|VM_EXEC|
21853 - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21854 + VM_MAYREAD|VM_MAYEXEC|
21855 VM_ALWAYSDUMP,
21856 - vdso_pages);
21857 + &vsyscall_page);
21858 if (ret) {
21859 - current->mm->context.vdso = NULL;
21860 + mm->context.vdso = 0;
21861 goto up_fail;
21862 }
21863
21864 + ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
21865 + VM_READ|VM_EXEC|
21866 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
21867 + VM_ALWAYSDUMP,
21868 + vdso_pages);
21869 + if (ret)
21870 + mm->context.vdso = 0;
21871 +
21872 up_fail:
21873 up_write(&mm->mmap_sem);
21874 return ret;
21875 }
21876 -
21877 -static __init int vdso_setup(char *s)
21878 -{
21879 - vdso_enabled = simple_strtoul(s, NULL, 0);
21880 - return 0;
21881 -}
21882 -__setup("vdso=", vdso_setup);
21883 diff -urNp linux-3.0.4/arch/x86/xen/enlighten.c linux-3.0.4/arch/x86/xen/enlighten.c
21884 --- linux-3.0.4/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
21885 +++ linux-3.0.4/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
21886 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21887
21888 struct shared_info xen_dummy_shared_info;
21889
21890 -void *xen_initial_gdt;
21891 -
21892 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21893 __read_mostly int xen_have_vector_callback;
21894 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21895 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21896 #endif
21897 };
21898
21899 -static void xen_reboot(int reason)
21900 +static __noreturn void xen_reboot(int reason)
21901 {
21902 struct sched_shutdown r = { .reason = reason };
21903
21904 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21905 BUG();
21906 }
21907
21908 -static void xen_restart(char *msg)
21909 +static __noreturn void xen_restart(char *msg)
21910 {
21911 xen_reboot(SHUTDOWN_reboot);
21912 }
21913
21914 -static void xen_emergency_restart(void)
21915 +static __noreturn void xen_emergency_restart(void)
21916 {
21917 xen_reboot(SHUTDOWN_reboot);
21918 }
21919
21920 -static void xen_machine_halt(void)
21921 +static __noreturn void xen_machine_halt(void)
21922 {
21923 xen_reboot(SHUTDOWN_poweroff);
21924 }
21925 @@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
21926 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21927
21928 /* Work out if we support NX */
21929 - x86_configure_nx();
21930 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21931 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21932 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21933 + unsigned l, h;
21934 +
21935 + __supported_pte_mask |= _PAGE_NX;
21936 + rdmsr(MSR_EFER, l, h);
21937 + l |= EFER_NX;
21938 + wrmsr(MSR_EFER, l, h);
21939 + }
21940 +#endif
21941
21942 xen_setup_features();
21943
21944 @@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
21945
21946 machine_ops = xen_machine_ops;
21947
21948 - /*
21949 - * The only reliable way to retain the initial address of the
21950 - * percpu gdt_page is to remember it here, so we can go and
21951 - * mark it RW later, when the initial percpu area is freed.
21952 - */
21953 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21954 -
21955 xen_smp_init();
21956
21957 #ifdef CONFIG_ACPI_NUMA
21958 diff -urNp linux-3.0.4/arch/x86/xen/mmu.c linux-3.0.4/arch/x86/xen/mmu.c
21959 --- linux-3.0.4/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
21960 +++ linux-3.0.4/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
21961 @@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
21962 convert_pfn_mfn(init_level4_pgt);
21963 convert_pfn_mfn(level3_ident_pgt);
21964 convert_pfn_mfn(level3_kernel_pgt);
21965 + convert_pfn_mfn(level3_vmalloc_pgt);
21966 + convert_pfn_mfn(level3_vmemmap_pgt);
21967
21968 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21969 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21970 @@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
21971 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21972 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21973 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21974 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21975 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21976 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21977 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21978 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21979 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21980
21981 @@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
21982 pv_mmu_ops.set_pud = xen_set_pud;
21983 #if PAGETABLE_LEVELS == 4
21984 pv_mmu_ops.set_pgd = xen_set_pgd;
21985 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
21986 #endif
21987
21988 /* This will work as long as patching hasn't happened yet
21989 @@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
21990 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
21991 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
21992 .set_pgd = xen_set_pgd_hyper,
21993 + .set_pgd_batched = xen_set_pgd_hyper,
21994
21995 .alloc_pud = xen_alloc_pmd_init,
21996 .release_pud = xen_release_pmd_init,
21997 diff -urNp linux-3.0.4/arch/x86/xen/smp.c linux-3.0.4/arch/x86/xen/smp.c
21998 --- linux-3.0.4/arch/x86/xen/smp.c 2011-09-02 18:11:26.000000000 -0400
21999 +++ linux-3.0.4/arch/x86/xen/smp.c 2011-08-29 23:26:21.000000000 -0400
22000 @@ -193,11 +193,6 @@ static void __init xen_smp_prepare_boot_
22001 {
22002 BUG_ON(smp_processor_id() != 0);
22003 native_smp_prepare_boot_cpu();
22004 -
22005 - /* We've switched to the "real" per-cpu gdt, so make sure the
22006 - old memory can be recycled */
22007 - make_lowmem_page_readwrite(xen_initial_gdt);
22008 -
22009 xen_filter_cpu_maps();
22010 xen_setup_vcpu_info_placement();
22011 }
22012 @@ -265,12 +260,12 @@ cpu_initialize_context(unsigned int cpu,
22013 gdt = get_cpu_gdt_table(cpu);
22014
22015 ctxt->flags = VGCF_IN_KERNEL;
22016 - ctxt->user_regs.ds = __USER_DS;
22017 - ctxt->user_regs.es = __USER_DS;
22018 + ctxt->user_regs.ds = __KERNEL_DS;
22019 + ctxt->user_regs.es = __KERNEL_DS;
22020 ctxt->user_regs.ss = __KERNEL_DS;
22021 #ifdef CONFIG_X86_32
22022 ctxt->user_regs.fs = __KERNEL_PERCPU;
22023 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22024 + savesegment(gs, ctxt->user_regs.gs);
22025 #else
22026 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22027 #endif
22028 @@ -321,13 +316,12 @@ static int __cpuinit xen_cpu_up(unsigned
22029 int rc;
22030
22031 per_cpu(current_task, cpu) = idle;
22032 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22033 #ifdef CONFIG_X86_32
22034 irq_ctx_init(cpu);
22035 #else
22036 clear_tsk_thread_flag(idle, TIF_FORK);
22037 - per_cpu(kernel_stack, cpu) =
22038 - (unsigned long)task_stack_page(idle) -
22039 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22040 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22041 #endif
22042 xen_setup_runstate_info(cpu);
22043 xen_setup_timer(cpu);
22044 diff -urNp linux-3.0.4/arch/x86/xen/xen-asm_32.S linux-3.0.4/arch/x86/xen/xen-asm_32.S
22045 --- linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-07-21 22:17:23.000000000 -0400
22046 +++ linux-3.0.4/arch/x86/xen/xen-asm_32.S 2011-08-23 21:47:55.000000000 -0400
22047 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
22048 ESP_OFFSET=4 # bytes pushed onto stack
22049
22050 /*
22051 - * Store vcpu_info pointer for easy access. Do it this way to
22052 - * avoid having to reload %fs
22053 + * Store vcpu_info pointer for easy access.
22054 */
22055 #ifdef CONFIG_SMP
22056 - GET_THREAD_INFO(%eax)
22057 - movl TI_cpu(%eax), %eax
22058 - movl __per_cpu_offset(,%eax,4), %eax
22059 - mov xen_vcpu(%eax), %eax
22060 + push %fs
22061 + mov $(__KERNEL_PERCPU), %eax
22062 + mov %eax, %fs
22063 + mov PER_CPU_VAR(xen_vcpu), %eax
22064 + pop %fs
22065 #else
22066 movl xen_vcpu, %eax
22067 #endif
22068 diff -urNp linux-3.0.4/arch/x86/xen/xen-head.S linux-3.0.4/arch/x86/xen/xen-head.S
22069 --- linux-3.0.4/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22070 +++ linux-3.0.4/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22071 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
22072 #ifdef CONFIG_X86_32
22073 mov %esi,xen_start_info
22074 mov $init_thread_union+THREAD_SIZE,%esp
22075 +#ifdef CONFIG_SMP
22076 + movl $cpu_gdt_table,%edi
22077 + movl $__per_cpu_load,%eax
22078 + movw %ax,__KERNEL_PERCPU + 2(%edi)
22079 + rorl $16,%eax
22080 + movb %al,__KERNEL_PERCPU + 4(%edi)
22081 + movb %ah,__KERNEL_PERCPU + 7(%edi)
22082 + movl $__per_cpu_end - 1,%eax
22083 + subl $__per_cpu_start,%eax
22084 + movw %ax,__KERNEL_PERCPU + 0(%edi)
22085 +#endif
22086 #else
22087 mov %rsi,xen_start_info
22088 mov $init_thread_union+THREAD_SIZE,%rsp
22089 diff -urNp linux-3.0.4/arch/x86/xen/xen-ops.h linux-3.0.4/arch/x86/xen/xen-ops.h
22090 --- linux-3.0.4/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22091 +++ linux-3.0.4/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22092 @@ -10,8 +10,6 @@
22093 extern const char xen_hypervisor_callback[];
22094 extern const char xen_failsafe_callback[];
22095
22096 -extern void *xen_initial_gdt;
22097 -
22098 struct trap_info;
22099 void xen_copy_trap_info(struct trap_info *traps);
22100
22101 diff -urNp linux-3.0.4/block/blk-iopoll.c linux-3.0.4/block/blk-iopoll.c
22102 --- linux-3.0.4/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22103 +++ linux-3.0.4/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22104 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22105 }
22106 EXPORT_SYMBOL(blk_iopoll_complete);
22107
22108 -static void blk_iopoll_softirq(struct softirq_action *h)
22109 +static void blk_iopoll_softirq(void)
22110 {
22111 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22112 int rearm = 0, budget = blk_iopoll_budget;
22113 diff -urNp linux-3.0.4/block/blk-map.c linux-3.0.4/block/blk-map.c
22114 --- linux-3.0.4/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22115 +++ linux-3.0.4/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22116 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22117 if (!len || !kbuf)
22118 return -EINVAL;
22119
22120 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22121 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22122 if (do_copy)
22123 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22124 else
22125 diff -urNp linux-3.0.4/block/blk-softirq.c linux-3.0.4/block/blk-softirq.c
22126 --- linux-3.0.4/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22127 +++ linux-3.0.4/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22128 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22129 * Softirq action handler - move entries to local list and loop over them
22130 * while passing them to the queue registered handler.
22131 */
22132 -static void blk_done_softirq(struct softirq_action *h)
22133 +static void blk_done_softirq(void)
22134 {
22135 struct list_head *cpu_list, local_list;
22136
22137 diff -urNp linux-3.0.4/block/bsg.c linux-3.0.4/block/bsg.c
22138 --- linux-3.0.4/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22139 +++ linux-3.0.4/block/bsg.c 2011-08-23 21:47:55.000000000 -0400
22140 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22141 struct sg_io_v4 *hdr, struct bsg_device *bd,
22142 fmode_t has_write_perm)
22143 {
22144 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22145 + unsigned char *cmdptr;
22146 +
22147 if (hdr->request_len > BLK_MAX_CDB) {
22148 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22149 if (!rq->cmd)
22150 return -ENOMEM;
22151 - }
22152 + cmdptr = rq->cmd;
22153 + } else
22154 + cmdptr = tmpcmd;
22155
22156 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22157 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
22158 hdr->request_len))
22159 return -EFAULT;
22160
22161 + if (cmdptr != rq->cmd)
22162 + memcpy(rq->cmd, cmdptr, hdr->request_len);
22163 +
22164 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22165 if (blk_verify_command(rq->cmd, has_write_perm))
22166 return -EPERM;
22167 diff -urNp linux-3.0.4/block/scsi_ioctl.c linux-3.0.4/block/scsi_ioctl.c
22168 --- linux-3.0.4/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22169 +++ linux-3.0.4/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
22170 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
22171 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22172 struct sg_io_hdr *hdr, fmode_t mode)
22173 {
22174 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22175 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22176 + unsigned char *cmdptr;
22177 +
22178 + if (rq->cmd != rq->__cmd)
22179 + cmdptr = rq->cmd;
22180 + else
22181 + cmdptr = tmpcmd;
22182 +
22183 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22184 return -EFAULT;
22185 +
22186 + if (cmdptr != rq->cmd)
22187 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22188 +
22189 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22190 return -EPERM;
22191
22192 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
22193 int err;
22194 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22195 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22196 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22197 + unsigned char *cmdptr;
22198
22199 if (!sic)
22200 return -EINVAL;
22201 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
22202 */
22203 err = -EFAULT;
22204 rq->cmd_len = cmdlen;
22205 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
22206 +
22207 + if (rq->cmd != rq->__cmd)
22208 + cmdptr = rq->cmd;
22209 + else
22210 + cmdptr = tmpcmd;
22211 +
22212 + if (copy_from_user(cmdptr, sic->data, cmdlen))
22213 goto error;
22214
22215 + if (rq->cmd != cmdptr)
22216 + memcpy(rq->cmd, cmdptr, cmdlen);
22217 +
22218 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22219 goto error;
22220
22221 diff -urNp linux-3.0.4/crypto/cryptd.c linux-3.0.4/crypto/cryptd.c
22222 --- linux-3.0.4/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
22223 +++ linux-3.0.4/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
22224 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
22225
22226 struct cryptd_blkcipher_request_ctx {
22227 crypto_completion_t complete;
22228 -};
22229 +} __no_const;
22230
22231 struct cryptd_hash_ctx {
22232 struct crypto_shash *child;
22233 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
22234
22235 struct cryptd_aead_request_ctx {
22236 crypto_completion_t complete;
22237 -};
22238 +} __no_const;
22239
22240 static void cryptd_queue_worker(struct work_struct *work);
22241
22242 diff -urNp linux-3.0.4/crypto/gf128mul.c linux-3.0.4/crypto/gf128mul.c
22243 --- linux-3.0.4/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
22244 +++ linux-3.0.4/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
22245 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
22246 for (i = 0; i < 7; ++i)
22247 gf128mul_x_lle(&p[i + 1], &p[i]);
22248
22249 - memset(r, 0, sizeof(r));
22250 + memset(r, 0, sizeof(*r));
22251 for (i = 0;;) {
22252 u8 ch = ((u8 *)b)[15 - i];
22253
22254 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
22255 for (i = 0; i < 7; ++i)
22256 gf128mul_x_bbe(&p[i + 1], &p[i]);
22257
22258 - memset(r, 0, sizeof(r));
22259 + memset(r, 0, sizeof(*r));
22260 for (i = 0;;) {
22261 u8 ch = ((u8 *)b)[i];
22262
22263 diff -urNp linux-3.0.4/crypto/serpent.c linux-3.0.4/crypto/serpent.c
22264 --- linux-3.0.4/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
22265 +++ linux-3.0.4/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
22266 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22267 u32 r0,r1,r2,r3,r4;
22268 int i;
22269
22270 + pax_track_stack();
22271 +
22272 /* Copy key, add padding */
22273
22274 for (i = 0; i < keylen; ++i)
22275 diff -urNp linux-3.0.4/Documentation/dontdiff linux-3.0.4/Documentation/dontdiff
22276 --- linux-3.0.4/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
22277 +++ linux-3.0.4/Documentation/dontdiff 2011-08-23 21:47:55.000000000 -0400
22278 @@ -5,6 +5,7 @@
22279 *.cis
22280 *.cpio
22281 *.csp
22282 +*.dbg
22283 *.dsp
22284 *.dvi
22285 *.elf
22286 @@ -48,9 +49,11 @@
22287 *.tab.h
22288 *.tex
22289 *.ver
22290 +*.vim
22291 *.xml
22292 *.xz
22293 *_MODULES
22294 +*_reg_safe.h
22295 *_vga16.c
22296 *~
22297 \#*#
22298 @@ -70,6 +73,7 @@ Kerntypes
22299 Module.markers
22300 Module.symvers
22301 PENDING
22302 +PERF*
22303 SCCS
22304 System.map*
22305 TAGS
22306 @@ -98,6 +102,8 @@ bzImage*
22307 capability_names.h
22308 capflags.c
22309 classlist.h*
22310 +clut_vga16.c
22311 +common-cmds.h
22312 comp*.log
22313 compile.h*
22314 conf
22315 @@ -126,12 +132,14 @@ fore200e_pca_fw.c*
22316 gconf
22317 gconf.glade.h
22318 gen-devlist
22319 +gen-kdb_cmds.c
22320 gen_crc32table
22321 gen_init_cpio
22322 generated
22323 genheaders
22324 genksyms
22325 *_gray256.c
22326 +hash
22327 hpet_example
22328 hugepage-mmap
22329 hugepage-shm
22330 @@ -146,7 +154,6 @@ int32.c
22331 int4.c
22332 int8.c
22333 kallsyms
22334 -kconfig
22335 keywords.c
22336 ksym.c*
22337 ksym.h*
22338 @@ -154,7 +161,6 @@ kxgettext
22339 lkc_defs.h
22340 lex.c
22341 lex.*.c
22342 -linux
22343 logo_*.c
22344 logo_*_clut224.c
22345 logo_*_mono.c
22346 @@ -174,6 +180,7 @@ mkboot
22347 mkbugboot
22348 mkcpustr
22349 mkdep
22350 +mkpiggy
22351 mkprep
22352 mkregtable
22353 mktables
22354 @@ -209,6 +216,7 @@ r300_reg_safe.h
22355 r420_reg_safe.h
22356 r600_reg_safe.h
22357 recordmcount
22358 +regdb.c
22359 relocs
22360 rlim_names.h
22361 rn50_reg_safe.h
22362 @@ -219,6 +227,7 @@ setup
22363 setup.bin
22364 setup.elf
22365 sImage
22366 +slabinfo
22367 sm_tbl*
22368 split-include
22369 syscalltab.h
22370 @@ -246,7 +255,9 @@ vmlinux
22371 vmlinux-*
22372 vmlinux.aout
22373 vmlinux.bin.all
22374 +vmlinux.bin.bz2
22375 vmlinux.lds
22376 +vmlinux.relocs
22377 vmlinuz
22378 voffset.h
22379 vsyscall.lds
22380 @@ -254,6 +265,7 @@ vsyscall_32.lds
22381 wanxlfw.inc
22382 uImage
22383 unifdef
22384 +utsrelease.h
22385 wakeup.bin
22386 wakeup.elf
22387 wakeup.lds
22388 diff -urNp linux-3.0.4/Documentation/kernel-parameters.txt linux-3.0.4/Documentation/kernel-parameters.txt
22389 --- linux-3.0.4/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
22390 +++ linux-3.0.4/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
22391 @@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
22392 the specified number of seconds. This is to be used if
22393 your oopses keep scrolling off the screen.
22394
22395 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22396 + virtualization environments that don't cope well with the
22397 + expand down segment used by UDEREF on X86-32 or the frequent
22398 + page table updates on X86-64.
22399 +
22400 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22401 +
22402 pcbit= [HW,ISDN]
22403
22404 pcd. [PARIDE]
22405 diff -urNp linux-3.0.4/drivers/acpi/apei/cper.c linux-3.0.4/drivers/acpi/apei/cper.c
22406 --- linux-3.0.4/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
22407 +++ linux-3.0.4/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
22408 @@ -38,12 +38,12 @@
22409 */
22410 u64 cper_next_record_id(void)
22411 {
22412 - static atomic64_t seq;
22413 + static atomic64_unchecked_t seq;
22414
22415 - if (!atomic64_read(&seq))
22416 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
22417 + if (!atomic64_read_unchecked(&seq))
22418 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22419
22420 - return atomic64_inc_return(&seq);
22421 + return atomic64_inc_return_unchecked(&seq);
22422 }
22423 EXPORT_SYMBOL_GPL(cper_next_record_id);
22424
22425 diff -urNp linux-3.0.4/drivers/acpi/ec_sys.c linux-3.0.4/drivers/acpi/ec_sys.c
22426 --- linux-3.0.4/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
22427 +++ linux-3.0.4/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
22428 @@ -11,6 +11,7 @@
22429 #include <linux/kernel.h>
22430 #include <linux/acpi.h>
22431 #include <linux/debugfs.h>
22432 +#include <asm/uaccess.h>
22433 #include "internal.h"
22434
22435 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
22436 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
22437 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
22438 */
22439 unsigned int size = EC_SPACE_SIZE;
22440 - u8 *data = (u8 *) buf;
22441 + u8 data;
22442 loff_t init_off = *off;
22443 int err = 0;
22444
22445 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
22446 size = count;
22447
22448 while (size) {
22449 - err = ec_read(*off, &data[*off - init_off]);
22450 + err = ec_read(*off, &data);
22451 if (err)
22452 return err;
22453 + if (put_user(data, &buf[*off - init_off]))
22454 + return -EFAULT;
22455 *off += 1;
22456 size--;
22457 }
22458 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
22459
22460 unsigned int size = count;
22461 loff_t init_off = *off;
22462 - u8 *data = (u8 *) buf;
22463 int err = 0;
22464
22465 if (*off >= EC_SPACE_SIZE)
22466 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
22467 }
22468
22469 while (size) {
22470 - u8 byte_write = data[*off - init_off];
22471 + u8 byte_write;
22472 + if (get_user(byte_write, &buf[*off - init_off]))
22473 + return -EFAULT;
22474 err = ec_write(*off, byte_write);
22475 if (err)
22476 return err;
22477 diff -urNp linux-3.0.4/drivers/acpi/proc.c linux-3.0.4/drivers/acpi/proc.c
22478 --- linux-3.0.4/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
22479 +++ linux-3.0.4/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
22480 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22481 size_t count, loff_t * ppos)
22482 {
22483 struct list_head *node, *next;
22484 - char strbuf[5];
22485 - char str[5] = "";
22486 - unsigned int len = count;
22487 -
22488 - if (len > 4)
22489 - len = 4;
22490 - if (len < 0)
22491 - return -EFAULT;
22492 + char strbuf[5] = {0};
22493
22494 - if (copy_from_user(strbuf, buffer, len))
22495 + if (count > 4)
22496 + count = 4;
22497 + if (copy_from_user(strbuf, buffer, count))
22498 return -EFAULT;
22499 - strbuf[len] = '\0';
22500 - sscanf(strbuf, "%s", str);
22501 + strbuf[count] = '\0';
22502
22503 mutex_lock(&acpi_device_lock);
22504 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22505 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22506 if (!dev->wakeup.flags.valid)
22507 continue;
22508
22509 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
22510 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22511 if (device_can_wakeup(&dev->dev)) {
22512 bool enable = !device_may_wakeup(&dev->dev);
22513 device_set_wakeup_enable(&dev->dev, enable);
22514 diff -urNp linux-3.0.4/drivers/acpi/processor_driver.c linux-3.0.4/drivers/acpi/processor_driver.c
22515 --- linux-3.0.4/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
22516 +++ linux-3.0.4/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
22517 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22518 return 0;
22519 #endif
22520
22521 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22522 + BUG_ON(pr->id >= nr_cpu_ids);
22523
22524 /*
22525 * Buggy BIOS check
22526 diff -urNp linux-3.0.4/drivers/ata/libata-core.c linux-3.0.4/drivers/ata/libata-core.c
22527 --- linux-3.0.4/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
22528 +++ linux-3.0.4/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
22529 @@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
22530 struct ata_port *ap;
22531 unsigned int tag;
22532
22533 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22534 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22535 ap = qc->ap;
22536
22537 qc->flags = 0;
22538 @@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
22539 struct ata_port *ap;
22540 struct ata_link *link;
22541
22542 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22543 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22544 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22545 ap = qc->ap;
22546 link = qc->dev->link;
22547 @@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
22548 return;
22549
22550 spin_lock(&lock);
22551 + pax_open_kernel();
22552
22553 for (cur = ops->inherits; cur; cur = cur->inherits) {
22554 void **inherit = (void **)cur;
22555 @@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
22556 if (IS_ERR(*pp))
22557 *pp = NULL;
22558
22559 - ops->inherits = NULL;
22560 + *(struct ata_port_operations **)&ops->inherits = NULL;
22561
22562 + pax_close_kernel();
22563 spin_unlock(&lock);
22564 }
22565
22566 diff -urNp linux-3.0.4/drivers/ata/libata-eh.c linux-3.0.4/drivers/ata/libata-eh.c
22567 --- linux-3.0.4/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
22568 +++ linux-3.0.4/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
22569 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22570 {
22571 struct ata_link *link;
22572
22573 + pax_track_stack();
22574 +
22575 ata_for_each_link(link, ap, HOST_FIRST)
22576 ata_eh_link_report(link);
22577 }
22578 diff -urNp linux-3.0.4/drivers/ata/pata_arasan_cf.c linux-3.0.4/drivers/ata/pata_arasan_cf.c
22579 --- linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
22580 +++ linux-3.0.4/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
22581 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22582 /* Handle platform specific quirks */
22583 if (pdata->quirk) {
22584 if (pdata->quirk & CF_BROKEN_PIO) {
22585 - ap->ops->set_piomode = NULL;
22586 + pax_open_kernel();
22587 + *(void **)&ap->ops->set_piomode = NULL;
22588 + pax_close_kernel();
22589 ap->pio_mask = 0;
22590 }
22591 if (pdata->quirk & CF_BROKEN_MWDMA)
22592 diff -urNp linux-3.0.4/drivers/atm/adummy.c linux-3.0.4/drivers/atm/adummy.c
22593 --- linux-3.0.4/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
22594 +++ linux-3.0.4/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
22595 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22596 vcc->pop(vcc, skb);
22597 else
22598 dev_kfree_skb_any(skb);
22599 - atomic_inc(&vcc->stats->tx);
22600 + atomic_inc_unchecked(&vcc->stats->tx);
22601
22602 return 0;
22603 }
22604 diff -urNp linux-3.0.4/drivers/atm/ambassador.c linux-3.0.4/drivers/atm/ambassador.c
22605 --- linux-3.0.4/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
22606 +++ linux-3.0.4/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
22607 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22608 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22609
22610 // VC layer stats
22611 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22612 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22613
22614 // free the descriptor
22615 kfree (tx_descr);
22616 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22617 dump_skb ("<<<", vc, skb);
22618
22619 // VC layer stats
22620 - atomic_inc(&atm_vcc->stats->rx);
22621 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22622 __net_timestamp(skb);
22623 // end of our responsibility
22624 atm_vcc->push (atm_vcc, skb);
22625 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22626 } else {
22627 PRINTK (KERN_INFO, "dropped over-size frame");
22628 // should we count this?
22629 - atomic_inc(&atm_vcc->stats->rx_drop);
22630 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22631 }
22632
22633 } else {
22634 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22635 }
22636
22637 if (check_area (skb->data, skb->len)) {
22638 - atomic_inc(&atm_vcc->stats->tx_err);
22639 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22640 return -ENOMEM; // ?
22641 }
22642
22643 diff -urNp linux-3.0.4/drivers/atm/atmtcp.c linux-3.0.4/drivers/atm/atmtcp.c
22644 --- linux-3.0.4/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
22645 +++ linux-3.0.4/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
22646 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22647 if (vcc->pop) vcc->pop(vcc,skb);
22648 else dev_kfree_skb(skb);
22649 if (dev_data) return 0;
22650 - atomic_inc(&vcc->stats->tx_err);
22651 + atomic_inc_unchecked(&vcc->stats->tx_err);
22652 return -ENOLINK;
22653 }
22654 size = skb->len+sizeof(struct atmtcp_hdr);
22655 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22656 if (!new_skb) {
22657 if (vcc->pop) vcc->pop(vcc,skb);
22658 else dev_kfree_skb(skb);
22659 - atomic_inc(&vcc->stats->tx_err);
22660 + atomic_inc_unchecked(&vcc->stats->tx_err);
22661 return -ENOBUFS;
22662 }
22663 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22664 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22665 if (vcc->pop) vcc->pop(vcc,skb);
22666 else dev_kfree_skb(skb);
22667 out_vcc->push(out_vcc,new_skb);
22668 - atomic_inc(&vcc->stats->tx);
22669 - atomic_inc(&out_vcc->stats->rx);
22670 + atomic_inc_unchecked(&vcc->stats->tx);
22671 + atomic_inc_unchecked(&out_vcc->stats->rx);
22672 return 0;
22673 }
22674
22675 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22676 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22677 read_unlock(&vcc_sklist_lock);
22678 if (!out_vcc) {
22679 - atomic_inc(&vcc->stats->tx_err);
22680 + atomic_inc_unchecked(&vcc->stats->tx_err);
22681 goto done;
22682 }
22683 skb_pull(skb,sizeof(struct atmtcp_hdr));
22684 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22685 __net_timestamp(new_skb);
22686 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22687 out_vcc->push(out_vcc,new_skb);
22688 - atomic_inc(&vcc->stats->tx);
22689 - atomic_inc(&out_vcc->stats->rx);
22690 + atomic_inc_unchecked(&vcc->stats->tx);
22691 + atomic_inc_unchecked(&out_vcc->stats->rx);
22692 done:
22693 if (vcc->pop) vcc->pop(vcc,skb);
22694 else dev_kfree_skb(skb);
22695 diff -urNp linux-3.0.4/drivers/atm/eni.c linux-3.0.4/drivers/atm/eni.c
22696 --- linux-3.0.4/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
22697 +++ linux-3.0.4/drivers/atm/eni.c 2011-08-23 21:47:55.000000000 -0400
22698 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22699 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22700 vcc->dev->number);
22701 length = 0;
22702 - atomic_inc(&vcc->stats->rx_err);
22703 + atomic_inc_unchecked(&vcc->stats->rx_err);
22704 }
22705 else {
22706 length = ATM_CELL_SIZE-1; /* no HEC */
22707 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22708 size);
22709 }
22710 eff = length = 0;
22711 - atomic_inc(&vcc->stats->rx_err);
22712 + atomic_inc_unchecked(&vcc->stats->rx_err);
22713 }
22714 else {
22715 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22716 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22717 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22718 vcc->dev->number,vcc->vci,length,size << 2,descr);
22719 length = eff = 0;
22720 - atomic_inc(&vcc->stats->rx_err);
22721 + atomic_inc_unchecked(&vcc->stats->rx_err);
22722 }
22723 }
22724 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22725 @@ -771,7 +771,7 @@ rx_dequeued++;
22726 vcc->push(vcc,skb);
22727 pushed++;
22728 }
22729 - atomic_inc(&vcc->stats->rx);
22730 + atomic_inc_unchecked(&vcc->stats->rx);
22731 }
22732 wake_up(&eni_dev->rx_wait);
22733 }
22734 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22735 PCI_DMA_TODEVICE);
22736 if (vcc->pop) vcc->pop(vcc,skb);
22737 else dev_kfree_skb_irq(skb);
22738 - atomic_inc(&vcc->stats->tx);
22739 + atomic_inc_unchecked(&vcc->stats->tx);
22740 wake_up(&eni_dev->tx_wait);
22741 dma_complete++;
22742 }
22743 diff -urNp linux-3.0.4/drivers/atm/firestream.c linux-3.0.4/drivers/atm/firestream.c
22744 --- linux-3.0.4/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
22745 +++ linux-3.0.4/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
22746 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22747 }
22748 }
22749
22750 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22751 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22752
22753 fs_dprintk (FS_DEBUG_TXMEM, "i");
22754 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22755 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22756 #endif
22757 skb_put (skb, qe->p1 & 0xffff);
22758 ATM_SKB(skb)->vcc = atm_vcc;
22759 - atomic_inc(&atm_vcc->stats->rx);
22760 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22761 __net_timestamp(skb);
22762 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22763 atm_vcc->push (atm_vcc, skb);
22764 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22765 kfree (pe);
22766 }
22767 if (atm_vcc)
22768 - atomic_inc(&atm_vcc->stats->rx_drop);
22769 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22770 break;
22771 case 0x1f: /* Reassembly abort: no buffers. */
22772 /* Silently increment error counter. */
22773 if (atm_vcc)
22774 - atomic_inc(&atm_vcc->stats->rx_drop);
22775 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22776 break;
22777 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22778 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22779 diff -urNp linux-3.0.4/drivers/atm/fore200e.c linux-3.0.4/drivers/atm/fore200e.c
22780 --- linux-3.0.4/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
22781 +++ linux-3.0.4/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
22782 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22783 #endif
22784 /* check error condition */
22785 if (*entry->status & STATUS_ERROR)
22786 - atomic_inc(&vcc->stats->tx_err);
22787 + atomic_inc_unchecked(&vcc->stats->tx_err);
22788 else
22789 - atomic_inc(&vcc->stats->tx);
22790 + atomic_inc_unchecked(&vcc->stats->tx);
22791 }
22792 }
22793
22794 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22795 if (skb == NULL) {
22796 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22797
22798 - atomic_inc(&vcc->stats->rx_drop);
22799 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22800 return -ENOMEM;
22801 }
22802
22803 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22804
22805 dev_kfree_skb_any(skb);
22806
22807 - atomic_inc(&vcc->stats->rx_drop);
22808 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22809 return -ENOMEM;
22810 }
22811
22812 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22813
22814 vcc->push(vcc, skb);
22815 - atomic_inc(&vcc->stats->rx);
22816 + atomic_inc_unchecked(&vcc->stats->rx);
22817
22818 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22819
22820 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22821 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22822 fore200e->atm_dev->number,
22823 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22824 - atomic_inc(&vcc->stats->rx_err);
22825 + atomic_inc_unchecked(&vcc->stats->rx_err);
22826 }
22827 }
22828
22829 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22830 goto retry_here;
22831 }
22832
22833 - atomic_inc(&vcc->stats->tx_err);
22834 + atomic_inc_unchecked(&vcc->stats->tx_err);
22835
22836 fore200e->tx_sat++;
22837 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22838 diff -urNp linux-3.0.4/drivers/atm/he.c linux-3.0.4/drivers/atm/he.c
22839 --- linux-3.0.4/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
22840 +++ linux-3.0.4/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
22841 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22842
22843 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22844 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22845 - atomic_inc(&vcc->stats->rx_drop);
22846 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22847 goto return_host_buffers;
22848 }
22849
22850 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22851 RBRQ_LEN_ERR(he_dev->rbrq_head)
22852 ? "LEN_ERR" : "",
22853 vcc->vpi, vcc->vci);
22854 - atomic_inc(&vcc->stats->rx_err);
22855 + atomic_inc_unchecked(&vcc->stats->rx_err);
22856 goto return_host_buffers;
22857 }
22858
22859 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22860 vcc->push(vcc, skb);
22861 spin_lock(&he_dev->global_lock);
22862
22863 - atomic_inc(&vcc->stats->rx);
22864 + atomic_inc_unchecked(&vcc->stats->rx);
22865
22866 return_host_buffers:
22867 ++pdus_assembled;
22868 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22869 tpd->vcc->pop(tpd->vcc, tpd->skb);
22870 else
22871 dev_kfree_skb_any(tpd->skb);
22872 - atomic_inc(&tpd->vcc->stats->tx_err);
22873 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22874 }
22875 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22876 return;
22877 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22878 vcc->pop(vcc, skb);
22879 else
22880 dev_kfree_skb_any(skb);
22881 - atomic_inc(&vcc->stats->tx_err);
22882 + atomic_inc_unchecked(&vcc->stats->tx_err);
22883 return -EINVAL;
22884 }
22885
22886 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22887 vcc->pop(vcc, skb);
22888 else
22889 dev_kfree_skb_any(skb);
22890 - atomic_inc(&vcc->stats->tx_err);
22891 + atomic_inc_unchecked(&vcc->stats->tx_err);
22892 return -EINVAL;
22893 }
22894 #endif
22895 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22896 vcc->pop(vcc, skb);
22897 else
22898 dev_kfree_skb_any(skb);
22899 - atomic_inc(&vcc->stats->tx_err);
22900 + atomic_inc_unchecked(&vcc->stats->tx_err);
22901 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22902 return -ENOMEM;
22903 }
22904 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22905 vcc->pop(vcc, skb);
22906 else
22907 dev_kfree_skb_any(skb);
22908 - atomic_inc(&vcc->stats->tx_err);
22909 + atomic_inc_unchecked(&vcc->stats->tx_err);
22910 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22911 return -ENOMEM;
22912 }
22913 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22914 __enqueue_tpd(he_dev, tpd, cid);
22915 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22916
22917 - atomic_inc(&vcc->stats->tx);
22918 + atomic_inc_unchecked(&vcc->stats->tx);
22919
22920 return 0;
22921 }
22922 diff -urNp linux-3.0.4/drivers/atm/horizon.c linux-3.0.4/drivers/atm/horizon.c
22923 --- linux-3.0.4/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
22924 +++ linux-3.0.4/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
22925 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22926 {
22927 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22928 // VC layer stats
22929 - atomic_inc(&vcc->stats->rx);
22930 + atomic_inc_unchecked(&vcc->stats->rx);
22931 __net_timestamp(skb);
22932 // end of our responsibility
22933 vcc->push (vcc, skb);
22934 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22935 dev->tx_iovec = NULL;
22936
22937 // VC layer stats
22938 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22939 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22940
22941 // free the skb
22942 hrz_kfree_skb (skb);
22943 diff -urNp linux-3.0.4/drivers/atm/idt77252.c linux-3.0.4/drivers/atm/idt77252.c
22944 --- linux-3.0.4/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
22945 +++ linux-3.0.4/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
22946 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22947 else
22948 dev_kfree_skb(skb);
22949
22950 - atomic_inc(&vcc->stats->tx);
22951 + atomic_inc_unchecked(&vcc->stats->tx);
22952 }
22953
22954 atomic_dec(&scq->used);
22955 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22956 if ((sb = dev_alloc_skb(64)) == NULL) {
22957 printk("%s: Can't allocate buffers for aal0.\n",
22958 card->name);
22959 - atomic_add(i, &vcc->stats->rx_drop);
22960 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22961 break;
22962 }
22963 if (!atm_charge(vcc, sb->truesize)) {
22964 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22965 card->name);
22966 - atomic_add(i - 1, &vcc->stats->rx_drop);
22967 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22968 dev_kfree_skb(sb);
22969 break;
22970 }
22971 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22972 ATM_SKB(sb)->vcc = vcc;
22973 __net_timestamp(sb);
22974 vcc->push(vcc, sb);
22975 - atomic_inc(&vcc->stats->rx);
22976 + atomic_inc_unchecked(&vcc->stats->rx);
22977
22978 cell += ATM_CELL_PAYLOAD;
22979 }
22980 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22981 "(CDC: %08x)\n",
22982 card->name, len, rpp->len, readl(SAR_REG_CDC));
22983 recycle_rx_pool_skb(card, rpp);
22984 - atomic_inc(&vcc->stats->rx_err);
22985 + atomic_inc_unchecked(&vcc->stats->rx_err);
22986 return;
22987 }
22988 if (stat & SAR_RSQE_CRC) {
22989 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22990 recycle_rx_pool_skb(card, rpp);
22991 - atomic_inc(&vcc->stats->rx_err);
22992 + atomic_inc_unchecked(&vcc->stats->rx_err);
22993 return;
22994 }
22995 if (skb_queue_len(&rpp->queue) > 1) {
22996 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22997 RXPRINTK("%s: Can't alloc RX skb.\n",
22998 card->name);
22999 recycle_rx_pool_skb(card, rpp);
23000 - atomic_inc(&vcc->stats->rx_err);
23001 + atomic_inc_unchecked(&vcc->stats->rx_err);
23002 return;
23003 }
23004 if (!atm_charge(vcc, skb->truesize)) {
23005 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23006 __net_timestamp(skb);
23007
23008 vcc->push(vcc, skb);
23009 - atomic_inc(&vcc->stats->rx);
23010 + atomic_inc_unchecked(&vcc->stats->rx);
23011
23012 return;
23013 }
23014 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23015 __net_timestamp(skb);
23016
23017 vcc->push(vcc, skb);
23018 - atomic_inc(&vcc->stats->rx);
23019 + atomic_inc_unchecked(&vcc->stats->rx);
23020
23021 if (skb->truesize > SAR_FB_SIZE_3)
23022 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23023 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23024 if (vcc->qos.aal != ATM_AAL0) {
23025 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23026 card->name, vpi, vci);
23027 - atomic_inc(&vcc->stats->rx_drop);
23028 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23029 goto drop;
23030 }
23031
23032 if ((sb = dev_alloc_skb(64)) == NULL) {
23033 printk("%s: Can't allocate buffers for AAL0.\n",
23034 card->name);
23035 - atomic_inc(&vcc->stats->rx_err);
23036 + atomic_inc_unchecked(&vcc->stats->rx_err);
23037 goto drop;
23038 }
23039
23040 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23041 ATM_SKB(sb)->vcc = vcc;
23042 __net_timestamp(sb);
23043 vcc->push(vcc, sb);
23044 - atomic_inc(&vcc->stats->rx);
23045 + atomic_inc_unchecked(&vcc->stats->rx);
23046
23047 drop:
23048 skb_pull(queue, 64);
23049 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23050
23051 if (vc == NULL) {
23052 printk("%s: NULL connection in send().\n", card->name);
23053 - atomic_inc(&vcc->stats->tx_err);
23054 + atomic_inc_unchecked(&vcc->stats->tx_err);
23055 dev_kfree_skb(skb);
23056 return -EINVAL;
23057 }
23058 if (!test_bit(VCF_TX, &vc->flags)) {
23059 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23060 - atomic_inc(&vcc->stats->tx_err);
23061 + atomic_inc_unchecked(&vcc->stats->tx_err);
23062 dev_kfree_skb(skb);
23063 return -EINVAL;
23064 }
23065 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23066 break;
23067 default:
23068 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23069 - atomic_inc(&vcc->stats->tx_err);
23070 + atomic_inc_unchecked(&vcc->stats->tx_err);
23071 dev_kfree_skb(skb);
23072 return -EINVAL;
23073 }
23074
23075 if (skb_shinfo(skb)->nr_frags != 0) {
23076 printk("%s: No scatter-gather yet.\n", card->name);
23077 - atomic_inc(&vcc->stats->tx_err);
23078 + atomic_inc_unchecked(&vcc->stats->tx_err);
23079 dev_kfree_skb(skb);
23080 return -EINVAL;
23081 }
23082 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23083
23084 err = queue_skb(card, vc, skb, oam);
23085 if (err) {
23086 - atomic_inc(&vcc->stats->tx_err);
23087 + atomic_inc_unchecked(&vcc->stats->tx_err);
23088 dev_kfree_skb(skb);
23089 return err;
23090 }
23091 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23092 skb = dev_alloc_skb(64);
23093 if (!skb) {
23094 printk("%s: Out of memory in send_oam().\n", card->name);
23095 - atomic_inc(&vcc->stats->tx_err);
23096 + atomic_inc_unchecked(&vcc->stats->tx_err);
23097 return -ENOMEM;
23098 }
23099 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23100 diff -urNp linux-3.0.4/drivers/atm/iphase.c linux-3.0.4/drivers/atm/iphase.c
23101 --- linux-3.0.4/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23102 +++ linux-3.0.4/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23103 @@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23104 status = (u_short) (buf_desc_ptr->desc_mode);
23105 if (status & (RX_CER | RX_PTE | RX_OFL))
23106 {
23107 - atomic_inc(&vcc->stats->rx_err);
23108 + atomic_inc_unchecked(&vcc->stats->rx_err);
23109 IF_ERR(printk("IA: bad packet, dropping it");)
23110 if (status & RX_CER) {
23111 IF_ERR(printk(" cause: packet CRC error\n");)
23112 @@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23113 len = dma_addr - buf_addr;
23114 if (len > iadev->rx_buf_sz) {
23115 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23116 - atomic_inc(&vcc->stats->rx_err);
23117 + atomic_inc_unchecked(&vcc->stats->rx_err);
23118 goto out_free_desc;
23119 }
23120
23121 @@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23122 ia_vcc = INPH_IA_VCC(vcc);
23123 if (ia_vcc == NULL)
23124 {
23125 - atomic_inc(&vcc->stats->rx_err);
23126 + atomic_inc_unchecked(&vcc->stats->rx_err);
23127 dev_kfree_skb_any(skb);
23128 atm_return(vcc, atm_guess_pdu2truesize(len));
23129 goto INCR_DLE;
23130 @@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23131 if ((length > iadev->rx_buf_sz) || (length >
23132 (skb->len - sizeof(struct cpcs_trailer))))
23133 {
23134 - atomic_inc(&vcc->stats->rx_err);
23135 + atomic_inc_unchecked(&vcc->stats->rx_err);
23136 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23137 length, skb->len);)
23138 dev_kfree_skb_any(skb);
23139 @@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23140
23141 IF_RX(printk("rx_dle_intr: skb push");)
23142 vcc->push(vcc,skb);
23143 - atomic_inc(&vcc->stats->rx);
23144 + atomic_inc_unchecked(&vcc->stats->rx);
23145 iadev->rx_pkt_cnt++;
23146 }
23147 INCR_DLE:
23148 @@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23149 {
23150 struct k_sonet_stats *stats;
23151 stats = &PRIV(_ia_dev[board])->sonet_stats;
23152 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23153 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23154 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23155 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23156 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23157 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23158 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23159 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23160 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23161 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23162 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23163 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23164 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23165 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23166 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23167 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23168 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23169 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23170 }
23171 ia_cmds.status = 0;
23172 break;
23173 @@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23174 if ((desc == 0) || (desc > iadev->num_tx_desc))
23175 {
23176 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23177 - atomic_inc(&vcc->stats->tx);
23178 + atomic_inc_unchecked(&vcc->stats->tx);
23179 if (vcc->pop)
23180 vcc->pop(vcc, skb);
23181 else
23182 @@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23183 ATM_DESC(skb) = vcc->vci;
23184 skb_queue_tail(&iadev->tx_dma_q, skb);
23185
23186 - atomic_inc(&vcc->stats->tx);
23187 + atomic_inc_unchecked(&vcc->stats->tx);
23188 iadev->tx_pkt_cnt++;
23189 /* Increment transaction counter */
23190 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23191
23192 #if 0
23193 /* add flow control logic */
23194 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23195 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23196 if (iavcc->vc_desc_cnt > 10) {
23197 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23198 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23199 diff -urNp linux-3.0.4/drivers/atm/lanai.c linux-3.0.4/drivers/atm/lanai.c
23200 --- linux-3.0.4/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23201 +++ linux-3.0.4/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23202 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23203 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23204 lanai_endtx(lanai, lvcc);
23205 lanai_free_skb(lvcc->tx.atmvcc, skb);
23206 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23207 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23208 }
23209
23210 /* Try to fill the buffer - don't call unless there is backlog */
23211 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23212 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23213 __net_timestamp(skb);
23214 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23215 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23216 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23217 out:
23218 lvcc->rx.buf.ptr = end;
23219 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23220 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23221 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23222 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23223 lanai->stats.service_rxnotaal5++;
23224 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23225 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23226 return 0;
23227 }
23228 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23229 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23230 int bytes;
23231 read_unlock(&vcc_sklist_lock);
23232 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23233 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23234 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23235 lvcc->stats.x.aal5.service_trash++;
23236 bytes = (SERVICE_GET_END(s) * 16) -
23237 (((unsigned long) lvcc->rx.buf.ptr) -
23238 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23239 }
23240 if (s & SERVICE_STREAM) {
23241 read_unlock(&vcc_sklist_lock);
23242 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23243 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23244 lvcc->stats.x.aal5.service_stream++;
23245 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23246 "PDU on VCI %d!\n", lanai->number, vci);
23247 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23248 return 0;
23249 }
23250 DPRINTK("got rx crc error on vci %d\n", vci);
23251 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23252 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23253 lvcc->stats.x.aal5.service_rxcrc++;
23254 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23255 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23256 diff -urNp linux-3.0.4/drivers/atm/nicstar.c linux-3.0.4/drivers/atm/nicstar.c
23257 --- linux-3.0.4/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
23258 +++ linux-3.0.4/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
23259 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
23260 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
23261 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
23262 card->index);
23263 - atomic_inc(&vcc->stats->tx_err);
23264 + atomic_inc_unchecked(&vcc->stats->tx_err);
23265 dev_kfree_skb_any(skb);
23266 return -EINVAL;
23267 }
23268 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
23269 if (!vc->tx) {
23270 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
23271 card->index);
23272 - atomic_inc(&vcc->stats->tx_err);
23273 + atomic_inc_unchecked(&vcc->stats->tx_err);
23274 dev_kfree_skb_any(skb);
23275 return -EINVAL;
23276 }
23277 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
23278 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
23279 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
23280 card->index);
23281 - atomic_inc(&vcc->stats->tx_err);
23282 + atomic_inc_unchecked(&vcc->stats->tx_err);
23283 dev_kfree_skb_any(skb);
23284 return -EINVAL;
23285 }
23286
23287 if (skb_shinfo(skb)->nr_frags != 0) {
23288 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23289 - atomic_inc(&vcc->stats->tx_err);
23290 + atomic_inc_unchecked(&vcc->stats->tx_err);
23291 dev_kfree_skb_any(skb);
23292 return -EINVAL;
23293 }
23294 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
23295 }
23296
23297 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
23298 - atomic_inc(&vcc->stats->tx_err);
23299 + atomic_inc_unchecked(&vcc->stats->tx_err);
23300 dev_kfree_skb_any(skb);
23301 return -EIO;
23302 }
23303 - atomic_inc(&vcc->stats->tx);
23304 + atomic_inc_unchecked(&vcc->stats->tx);
23305
23306 return 0;
23307 }
23308 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
23309 printk
23310 ("nicstar%d: Can't allocate buffers for aal0.\n",
23311 card->index);
23312 - atomic_add(i, &vcc->stats->rx_drop);
23313 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
23314 break;
23315 }
23316 if (!atm_charge(vcc, sb->truesize)) {
23317 RXPRINTK
23318 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
23319 card->index);
23320 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23321 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23322 dev_kfree_skb_any(sb);
23323 break;
23324 }
23325 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
23326 ATM_SKB(sb)->vcc = vcc;
23327 __net_timestamp(sb);
23328 vcc->push(vcc, sb);
23329 - atomic_inc(&vcc->stats->rx);
23330 + atomic_inc_unchecked(&vcc->stats->rx);
23331 cell += ATM_CELL_PAYLOAD;
23332 }
23333
23334 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
23335 if (iovb == NULL) {
23336 printk("nicstar%d: Out of iovec buffers.\n",
23337 card->index);
23338 - atomic_inc(&vcc->stats->rx_drop);
23339 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23340 recycle_rx_buf(card, skb);
23341 return;
23342 }
23343 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
23344 small or large buffer itself. */
23345 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
23346 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23347 - atomic_inc(&vcc->stats->rx_err);
23348 + atomic_inc_unchecked(&vcc->stats->rx_err);
23349 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23350 NS_MAX_IOVECS);
23351 NS_PRV_IOVCNT(iovb) = 0;
23352 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
23353 ("nicstar%d: Expected a small buffer, and this is not one.\n",
23354 card->index);
23355 which_list(card, skb);
23356 - atomic_inc(&vcc->stats->rx_err);
23357 + atomic_inc_unchecked(&vcc->stats->rx_err);
23358 recycle_rx_buf(card, skb);
23359 vc->rx_iov = NULL;
23360 recycle_iov_buf(card, iovb);
23361 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
23362 ("nicstar%d: Expected a large buffer, and this is not one.\n",
23363 card->index);
23364 which_list(card, skb);
23365 - atomic_inc(&vcc->stats->rx_err);
23366 + atomic_inc_unchecked(&vcc->stats->rx_err);
23367 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23368 NS_PRV_IOVCNT(iovb));
23369 vc->rx_iov = NULL;
23370 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23371 printk(" - PDU size mismatch.\n");
23372 else
23373 printk(".\n");
23374 - atomic_inc(&vcc->stats->rx_err);
23375 + atomic_inc_unchecked(&vcc->stats->rx_err);
23376 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23377 NS_PRV_IOVCNT(iovb));
23378 vc->rx_iov = NULL;
23379 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23380 /* skb points to a small buffer */
23381 if (!atm_charge(vcc, skb->truesize)) {
23382 push_rxbufs(card, skb);
23383 - atomic_inc(&vcc->stats->rx_drop);
23384 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23385 } else {
23386 skb_put(skb, len);
23387 dequeue_sm_buf(card, skb);
23388 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23389 ATM_SKB(skb)->vcc = vcc;
23390 __net_timestamp(skb);
23391 vcc->push(vcc, skb);
23392 - atomic_inc(&vcc->stats->rx);
23393 + atomic_inc_unchecked(&vcc->stats->rx);
23394 }
23395 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23396 struct sk_buff *sb;
23397 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23398 if (len <= NS_SMBUFSIZE) {
23399 if (!atm_charge(vcc, sb->truesize)) {
23400 push_rxbufs(card, sb);
23401 - atomic_inc(&vcc->stats->rx_drop);
23402 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23403 } else {
23404 skb_put(sb, len);
23405 dequeue_sm_buf(card, sb);
23406 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23407 ATM_SKB(sb)->vcc = vcc;
23408 __net_timestamp(sb);
23409 vcc->push(vcc, sb);
23410 - atomic_inc(&vcc->stats->rx);
23411 + atomic_inc_unchecked(&vcc->stats->rx);
23412 }
23413
23414 push_rxbufs(card, skb);
23415 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23416
23417 if (!atm_charge(vcc, skb->truesize)) {
23418 push_rxbufs(card, skb);
23419 - atomic_inc(&vcc->stats->rx_drop);
23420 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23421 } else {
23422 dequeue_lg_buf(card, skb);
23423 #ifdef NS_USE_DESTRUCTORS
23424 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23425 ATM_SKB(skb)->vcc = vcc;
23426 __net_timestamp(skb);
23427 vcc->push(vcc, skb);
23428 - atomic_inc(&vcc->stats->rx);
23429 + atomic_inc_unchecked(&vcc->stats->rx);
23430 }
23431
23432 push_rxbufs(card, sb);
23433 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23434 printk
23435 ("nicstar%d: Out of huge buffers.\n",
23436 card->index);
23437 - atomic_inc(&vcc->stats->rx_drop);
23438 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23439 recycle_iovec_rx_bufs(card,
23440 (struct iovec *)
23441 iovb->data,
23442 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23443 card->hbpool.count++;
23444 } else
23445 dev_kfree_skb_any(hb);
23446 - atomic_inc(&vcc->stats->rx_drop);
23447 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23448 } else {
23449 /* Copy the small buffer to the huge buffer */
23450 sb = (struct sk_buff *)iov->iov_base;
23451 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23452 #endif /* NS_USE_DESTRUCTORS */
23453 __net_timestamp(hb);
23454 vcc->push(vcc, hb);
23455 - atomic_inc(&vcc->stats->rx);
23456 + atomic_inc_unchecked(&vcc->stats->rx);
23457 }
23458 }
23459
23460 diff -urNp linux-3.0.4/drivers/atm/solos-pci.c linux-3.0.4/drivers/atm/solos-pci.c
23461 --- linux-3.0.4/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
23462 +++ linux-3.0.4/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
23463 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
23464 }
23465 atm_charge(vcc, skb->truesize);
23466 vcc->push(vcc, skb);
23467 - atomic_inc(&vcc->stats->rx);
23468 + atomic_inc_unchecked(&vcc->stats->rx);
23469 break;
23470
23471 case PKT_STATUS:
23472 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
23473 char msg[500];
23474 char item[10];
23475
23476 + pax_track_stack();
23477 +
23478 len = buf->len;
23479 for (i = 0; i < len; i++){
23480 if(i % 8 == 0)
23481 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
23482 vcc = SKB_CB(oldskb)->vcc;
23483
23484 if (vcc) {
23485 - atomic_inc(&vcc->stats->tx);
23486 + atomic_inc_unchecked(&vcc->stats->tx);
23487 solos_pop(vcc, oldskb);
23488 } else
23489 dev_kfree_skb_irq(oldskb);
23490 diff -urNp linux-3.0.4/drivers/atm/suni.c linux-3.0.4/drivers/atm/suni.c
23491 --- linux-3.0.4/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
23492 +++ linux-3.0.4/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
23493 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23494
23495
23496 #define ADD_LIMITED(s,v) \
23497 - atomic_add((v),&stats->s); \
23498 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23499 + atomic_add_unchecked((v),&stats->s); \
23500 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23501
23502
23503 static void suni_hz(unsigned long from_timer)
23504 diff -urNp linux-3.0.4/drivers/atm/uPD98402.c linux-3.0.4/drivers/atm/uPD98402.c
23505 --- linux-3.0.4/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
23506 +++ linux-3.0.4/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
23507 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23508 struct sonet_stats tmp;
23509 int error = 0;
23510
23511 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23512 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23513 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23514 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23515 if (zero && !error) {
23516 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23517
23518
23519 #define ADD_LIMITED(s,v) \
23520 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23521 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23522 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23523 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23524 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23525 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23526
23527
23528 static void stat_event(struct atm_dev *dev)
23529 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23530 if (reason & uPD98402_INT_PFM) stat_event(dev);
23531 if (reason & uPD98402_INT_PCO) {
23532 (void) GET(PCOCR); /* clear interrupt cause */
23533 - atomic_add(GET(HECCT),
23534 + atomic_add_unchecked(GET(HECCT),
23535 &PRIV(dev)->sonet_stats.uncorr_hcs);
23536 }
23537 if ((reason & uPD98402_INT_RFO) &&
23538 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23539 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23540 uPD98402_INT_LOS),PIMR); /* enable them */
23541 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23542 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23543 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23544 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23545 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23546 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23547 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23548 return 0;
23549 }
23550
23551 diff -urNp linux-3.0.4/drivers/atm/zatm.c linux-3.0.4/drivers/atm/zatm.c
23552 --- linux-3.0.4/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
23553 +++ linux-3.0.4/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
23554 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23555 }
23556 if (!size) {
23557 dev_kfree_skb_irq(skb);
23558 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23559 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23560 continue;
23561 }
23562 if (!atm_charge(vcc,skb->truesize)) {
23563 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23564 skb->len = size;
23565 ATM_SKB(skb)->vcc = vcc;
23566 vcc->push(vcc,skb);
23567 - atomic_inc(&vcc->stats->rx);
23568 + atomic_inc_unchecked(&vcc->stats->rx);
23569 }
23570 zout(pos & 0xffff,MTA(mbx));
23571 #if 0 /* probably a stupid idea */
23572 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23573 skb_queue_head(&zatm_vcc->backlog,skb);
23574 break;
23575 }
23576 - atomic_inc(&vcc->stats->tx);
23577 + atomic_inc_unchecked(&vcc->stats->tx);
23578 wake_up(&zatm_vcc->tx_wait);
23579 }
23580
23581 diff -urNp linux-3.0.4/drivers/base/power/wakeup.c linux-3.0.4/drivers/base/power/wakeup.c
23582 --- linux-3.0.4/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
23583 +++ linux-3.0.4/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
23584 @@ -29,14 +29,14 @@ bool events_check_enabled;
23585 * They need to be modified together atomically, so it's better to use one
23586 * atomic variable to hold them both.
23587 */
23588 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23589 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23590
23591 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23592 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23593
23594 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23595 {
23596 - unsigned int comb = atomic_read(&combined_event_count);
23597 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23598
23599 *cnt = (comb >> IN_PROGRESS_BITS);
23600 *inpr = comb & MAX_IN_PROGRESS;
23601 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
23602 ws->last_time = ktime_get();
23603
23604 /* Increment the counter of events in progress. */
23605 - atomic_inc(&combined_event_count);
23606 + atomic_inc_unchecked(&combined_event_count);
23607 }
23608
23609 /**
23610 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
23611 * Increment the counter of registered wakeup events and decrement the
23612 * couter of wakeup events in progress simultaneously.
23613 */
23614 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23615 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23616 }
23617
23618 /**
23619 diff -urNp linux-3.0.4/drivers/block/cciss.c linux-3.0.4/drivers/block/cciss.c
23620 --- linux-3.0.4/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
23621 +++ linux-3.0.4/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
23622 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
23623 int err;
23624 u32 cp;
23625
23626 + memset(&arg64, 0, sizeof(arg64));
23627 +
23628 err = 0;
23629 err |=
23630 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23631 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
23632 while (!list_empty(&h->reqQ)) {
23633 c = list_entry(h->reqQ.next, CommandList_struct, list);
23634 /* can't do anything if fifo is full */
23635 - if ((h->access.fifo_full(h))) {
23636 + if ((h->access->fifo_full(h))) {
23637 dev_warn(&h->pdev->dev, "fifo full\n");
23638 break;
23639 }
23640 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
23641 h->Qdepth--;
23642
23643 /* Tell the controller execute command */
23644 - h->access.submit_command(h, c);
23645 + h->access->submit_command(h, c);
23646
23647 /* Put job onto the completed Q */
23648 addQ(&h->cmpQ, c);
23649 @@ -3422,17 +3424,17 @@ startio:
23650
23651 static inline unsigned long get_next_completion(ctlr_info_t *h)
23652 {
23653 - return h->access.command_completed(h);
23654 + return h->access->command_completed(h);
23655 }
23656
23657 static inline int interrupt_pending(ctlr_info_t *h)
23658 {
23659 - return h->access.intr_pending(h);
23660 + return h->access->intr_pending(h);
23661 }
23662
23663 static inline long interrupt_not_for_us(ctlr_info_t *h)
23664 {
23665 - return ((h->access.intr_pending(h) == 0) ||
23666 + return ((h->access->intr_pending(h) == 0) ||
23667 (h->interrupts_enabled == 0));
23668 }
23669
23670 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
23671 u32 a;
23672
23673 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23674 - return h->access.command_completed(h);
23675 + return h->access->command_completed(h);
23676
23677 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23678 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23679 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
23680 trans_support & CFGTBL_Trans_use_short_tags);
23681
23682 /* Change the access methods to the performant access methods */
23683 - h->access = SA5_performant_access;
23684 + h->access = &SA5_performant_access;
23685 h->transMethod = CFGTBL_Trans_Performant;
23686
23687 return;
23688 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
23689 if (prod_index < 0)
23690 return -ENODEV;
23691 h->product_name = products[prod_index].product_name;
23692 - h->access = *(products[prod_index].access);
23693 + h->access = products[prod_index].access;
23694
23695 if (cciss_board_disabled(h)) {
23696 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23697 @@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
23698 }
23699
23700 /* make sure the board interrupts are off */
23701 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23702 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23703 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
23704 if (rc)
23705 goto clean2;
23706 @@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
23707 * fake ones to scoop up any residual completions.
23708 */
23709 spin_lock_irqsave(&h->lock, flags);
23710 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23711 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23712 spin_unlock_irqrestore(&h->lock, flags);
23713 free_irq(h->intr[PERF_MODE_INT], h);
23714 rc = cciss_request_irq(h, cciss_msix_discard_completions,
23715 @@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
23716 dev_info(&h->pdev->dev, "Board READY.\n");
23717 dev_info(&h->pdev->dev,
23718 "Waiting for stale completions to drain.\n");
23719 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23720 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23721 msleep(10000);
23722 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23723 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23724
23725 rc = controller_reset_failed(h->cfgtable);
23726 if (rc)
23727 @@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
23728 cciss_scsi_setup(h);
23729
23730 /* Turn the interrupts on so we can service requests */
23731 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23732 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23733
23734 /* Get the firmware version */
23735 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23736 @@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
23737 kfree(flush_buf);
23738 if (return_code != IO_OK)
23739 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23740 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23741 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23742 free_irq(h->intr[PERF_MODE_INT], h);
23743 }
23744
23745 diff -urNp linux-3.0.4/drivers/block/cciss.h linux-3.0.4/drivers/block/cciss.h
23746 --- linux-3.0.4/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
23747 +++ linux-3.0.4/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
23748 @@ -100,7 +100,7 @@ struct ctlr_info
23749 /* information about each logical volume */
23750 drive_info_struct *drv[CISS_MAX_LUN];
23751
23752 - struct access_method access;
23753 + struct access_method *access;
23754
23755 /* queue and queue Info */
23756 struct list_head reqQ;
23757 diff -urNp linux-3.0.4/drivers/block/cpqarray.c linux-3.0.4/drivers/block/cpqarray.c
23758 --- linux-3.0.4/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
23759 +++ linux-3.0.4/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
23760 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23761 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23762 goto Enomem4;
23763 }
23764 - hba[i]->access.set_intr_mask(hba[i], 0);
23765 + hba[i]->access->set_intr_mask(hba[i], 0);
23766 if (request_irq(hba[i]->intr, do_ida_intr,
23767 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23768 {
23769 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23770 add_timer(&hba[i]->timer);
23771
23772 /* Enable IRQ now that spinlock and rate limit timer are set up */
23773 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23774 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23775
23776 for(j=0; j<NWD; j++) {
23777 struct gendisk *disk = ida_gendisk[i][j];
23778 @@ -694,7 +694,7 @@ DBGINFO(
23779 for(i=0; i<NR_PRODUCTS; i++) {
23780 if (board_id == products[i].board_id) {
23781 c->product_name = products[i].product_name;
23782 - c->access = *(products[i].access);
23783 + c->access = products[i].access;
23784 break;
23785 }
23786 }
23787 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23788 hba[ctlr]->intr = intr;
23789 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23790 hba[ctlr]->product_name = products[j].product_name;
23791 - hba[ctlr]->access = *(products[j].access);
23792 + hba[ctlr]->access = products[j].access;
23793 hba[ctlr]->ctlr = ctlr;
23794 hba[ctlr]->board_id = board_id;
23795 hba[ctlr]->pci_dev = NULL; /* not PCI */
23796 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23797 struct scatterlist tmp_sg[SG_MAX];
23798 int i, dir, seg;
23799
23800 + pax_track_stack();
23801 +
23802 queue_next:
23803 creq = blk_peek_request(q);
23804 if (!creq)
23805 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23806
23807 while((c = h->reqQ) != NULL) {
23808 /* Can't do anything if we're busy */
23809 - if (h->access.fifo_full(h) == 0)
23810 + if (h->access->fifo_full(h) == 0)
23811 return;
23812
23813 /* Get the first entry from the request Q */
23814 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23815 h->Qdepth--;
23816
23817 /* Tell the controller to do our bidding */
23818 - h->access.submit_command(h, c);
23819 + h->access->submit_command(h, c);
23820
23821 /* Get onto the completion Q */
23822 addQ(&h->cmpQ, c);
23823 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23824 unsigned long flags;
23825 __u32 a,a1;
23826
23827 - istat = h->access.intr_pending(h);
23828 + istat = h->access->intr_pending(h);
23829 /* Is this interrupt for us? */
23830 if (istat == 0)
23831 return IRQ_NONE;
23832 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23833 */
23834 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23835 if (istat & FIFO_NOT_EMPTY) {
23836 - while((a = h->access.command_completed(h))) {
23837 + while((a = h->access->command_completed(h))) {
23838 a1 = a; a &= ~3;
23839 if ((c = h->cmpQ) == NULL)
23840 {
23841 @@ -1449,11 +1451,11 @@ static int sendcmd(
23842 /*
23843 * Disable interrupt
23844 */
23845 - info_p->access.set_intr_mask(info_p, 0);
23846 + info_p->access->set_intr_mask(info_p, 0);
23847 /* Make sure there is room in the command FIFO */
23848 /* Actually it should be completely empty at this time. */
23849 for (i = 200000; i > 0; i--) {
23850 - temp = info_p->access.fifo_full(info_p);
23851 + temp = info_p->access->fifo_full(info_p);
23852 if (temp != 0) {
23853 break;
23854 }
23855 @@ -1466,7 +1468,7 @@ DBG(
23856 /*
23857 * Send the cmd
23858 */
23859 - info_p->access.submit_command(info_p, c);
23860 + info_p->access->submit_command(info_p, c);
23861 complete = pollcomplete(ctlr);
23862
23863 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23864 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23865 * we check the new geometry. Then turn interrupts back on when
23866 * we're done.
23867 */
23868 - host->access.set_intr_mask(host, 0);
23869 + host->access->set_intr_mask(host, 0);
23870 getgeometry(ctlr);
23871 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23872 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23873
23874 for(i=0; i<NWD; i++) {
23875 struct gendisk *disk = ida_gendisk[ctlr][i];
23876 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23877 /* Wait (up to 2 seconds) for a command to complete */
23878
23879 for (i = 200000; i > 0; i--) {
23880 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23881 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23882 if (done == 0) {
23883 udelay(10); /* a short fixed delay */
23884 } else
23885 diff -urNp linux-3.0.4/drivers/block/cpqarray.h linux-3.0.4/drivers/block/cpqarray.h
23886 --- linux-3.0.4/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
23887 +++ linux-3.0.4/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
23888 @@ -99,7 +99,7 @@ struct ctlr_info {
23889 drv_info_t drv[NWD];
23890 struct proc_dir_entry *proc;
23891
23892 - struct access_method access;
23893 + struct access_method *access;
23894
23895 cmdlist_t *reqQ;
23896 cmdlist_t *cmpQ;
23897 diff -urNp linux-3.0.4/drivers/block/DAC960.c linux-3.0.4/drivers/block/DAC960.c
23898 --- linux-3.0.4/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
23899 +++ linux-3.0.4/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
23900 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23901 unsigned long flags;
23902 int Channel, TargetID;
23903
23904 + pax_track_stack();
23905 +
23906 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23907 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23908 sizeof(DAC960_SCSI_Inquiry_T) +
23909 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_int.h linux-3.0.4/drivers/block/drbd/drbd_int.h
23910 --- linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
23911 +++ linux-3.0.4/drivers/block/drbd/drbd_int.h 2011-08-23 21:47:55.000000000 -0400
23912 @@ -737,7 +737,7 @@ struct drbd_request;
23913 struct drbd_epoch {
23914 struct list_head list;
23915 unsigned int barrier_nr;
23916 - atomic_t epoch_size; /* increased on every request added. */
23917 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23918 atomic_t active; /* increased on every req. added, and dec on every finished. */
23919 unsigned long flags;
23920 };
23921 @@ -1109,7 +1109,7 @@ struct drbd_conf {
23922 void *int_dig_in;
23923 void *int_dig_vv;
23924 wait_queue_head_t seq_wait;
23925 - atomic_t packet_seq;
23926 + atomic_unchecked_t packet_seq;
23927 unsigned int peer_seq;
23928 spinlock_t peer_seq_lock;
23929 unsigned int minor;
23930 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_main.c linux-3.0.4/drivers/block/drbd/drbd_main.c
23931 --- linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
23932 +++ linux-3.0.4/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
23933 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
23934 p.sector = sector;
23935 p.block_id = block_id;
23936 p.blksize = blksize;
23937 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23938 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23939
23940 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23941 return false;
23942 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
23943 p.sector = cpu_to_be64(req->sector);
23944 p.block_id = (unsigned long)req;
23945 p.seq_num = cpu_to_be32(req->seq_num =
23946 - atomic_add_return(1, &mdev->packet_seq));
23947 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23948
23949 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23950
23951 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
23952 atomic_set(&mdev->unacked_cnt, 0);
23953 atomic_set(&mdev->local_cnt, 0);
23954 atomic_set(&mdev->net_cnt, 0);
23955 - atomic_set(&mdev->packet_seq, 0);
23956 + atomic_set_unchecked(&mdev->packet_seq, 0);
23957 atomic_set(&mdev->pp_in_use, 0);
23958 atomic_set(&mdev->pp_in_use_by_net, 0);
23959 atomic_set(&mdev->rs_sect_in, 0);
23960 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23961 mdev->receiver.t_state);
23962
23963 /* no need to lock it, I'm the only thread alive */
23964 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23965 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23966 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23967 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23968 mdev->al_writ_cnt =
23969 mdev->bm_writ_cnt =
23970 mdev->read_cnt =
23971 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_nl.c linux-3.0.4/drivers/block/drbd/drbd_nl.c
23972 --- linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
23973 +++ linux-3.0.4/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
23974 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
23975 module_put(THIS_MODULE);
23976 }
23977
23978 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23979 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23980
23981 static unsigned short *
23982 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23983 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
23984 cn_reply->id.idx = CN_IDX_DRBD;
23985 cn_reply->id.val = CN_VAL_DRBD;
23986
23987 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23988 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23989 cn_reply->ack = 0; /* not used here. */
23990 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23991 (int)((char *)tl - (char *)reply->tag_list);
23992 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23993 cn_reply->id.idx = CN_IDX_DRBD;
23994 cn_reply->id.val = CN_VAL_DRBD;
23995
23996 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23997 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23998 cn_reply->ack = 0; /* not used here. */
23999 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24000 (int)((char *)tl - (char *)reply->tag_list);
24001 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24002 cn_reply->id.idx = CN_IDX_DRBD;
24003 cn_reply->id.val = CN_VAL_DRBD;
24004
24005 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24006 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24007 cn_reply->ack = 0; // not used here.
24008 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24009 (int)((char*)tl - (char*)reply->tag_list);
24010 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24011 cn_reply->id.idx = CN_IDX_DRBD;
24012 cn_reply->id.val = CN_VAL_DRBD;
24013
24014 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24015 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24016 cn_reply->ack = 0; /* not used here. */
24017 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24018 (int)((char *)tl - (char *)reply->tag_list);
24019 diff -urNp linux-3.0.4/drivers/block/drbd/drbd_receiver.c linux-3.0.4/drivers/block/drbd/drbd_receiver.c
24020 --- linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24021 +++ linux-3.0.4/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24022 @@ -894,7 +894,7 @@ retry:
24023 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24024 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24025
24026 - atomic_set(&mdev->packet_seq, 0);
24027 + atomic_set_unchecked(&mdev->packet_seq, 0);
24028 mdev->peer_seq = 0;
24029
24030 drbd_thread_start(&mdev->asender);
24031 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24032 do {
24033 next_epoch = NULL;
24034
24035 - epoch_size = atomic_read(&epoch->epoch_size);
24036 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24037
24038 switch (ev & ~EV_CLEANUP) {
24039 case EV_PUT:
24040 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24041 rv = FE_DESTROYED;
24042 } else {
24043 epoch->flags = 0;
24044 - atomic_set(&epoch->epoch_size, 0);
24045 + atomic_set_unchecked(&epoch->epoch_size, 0);
24046 /* atomic_set(&epoch->active, 0); is already zero */
24047 if (rv == FE_STILL_LIVE)
24048 rv = FE_RECYCLED;
24049 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24050 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24051 drbd_flush(mdev);
24052
24053 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
24054 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24055 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24056 if (epoch)
24057 break;
24058 }
24059
24060 epoch = mdev->current_epoch;
24061 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24062 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24063
24064 D_ASSERT(atomic_read(&epoch->active) == 0);
24065 D_ASSERT(epoch->flags == 0);
24066 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24067 }
24068
24069 epoch->flags = 0;
24070 - atomic_set(&epoch->epoch_size, 0);
24071 + atomic_set_unchecked(&epoch->epoch_size, 0);
24072 atomic_set(&epoch->active, 0);
24073
24074 spin_lock(&mdev->epoch_lock);
24075 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
24076 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24077 list_add(&epoch->list, &mdev->current_epoch->list);
24078 mdev->current_epoch = epoch;
24079 mdev->epochs++;
24080 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24081 spin_unlock(&mdev->peer_seq_lock);
24082
24083 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24084 - atomic_inc(&mdev->current_epoch->epoch_size);
24085 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24086 return drbd_drain_block(mdev, data_size);
24087 }
24088
24089 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24090
24091 spin_lock(&mdev->epoch_lock);
24092 e->epoch = mdev->current_epoch;
24093 - atomic_inc(&e->epoch->epoch_size);
24094 + atomic_inc_unchecked(&e->epoch->epoch_size);
24095 atomic_inc(&e->epoch->active);
24096 spin_unlock(&mdev->epoch_lock);
24097
24098 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24099 D_ASSERT(list_empty(&mdev->done_ee));
24100
24101 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24102 - atomic_set(&mdev->current_epoch->epoch_size, 0);
24103 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24104 D_ASSERT(list_empty(&mdev->current_epoch->list));
24105 }
24106
24107 diff -urNp linux-3.0.4/drivers/block/nbd.c linux-3.0.4/drivers/block/nbd.c
24108 --- linux-3.0.4/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24109 +++ linux-3.0.4/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24110 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24111 struct kvec iov;
24112 sigset_t blocked, oldset;
24113
24114 + pax_track_stack();
24115 +
24116 if (unlikely(!sock)) {
24117 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24118 lo->disk->disk_name, (send ? "send" : "recv"));
24119 @@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24120 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24121 unsigned int cmd, unsigned long arg)
24122 {
24123 + pax_track_stack();
24124 +
24125 switch (cmd) {
24126 case NBD_DISCONNECT: {
24127 struct request sreq;
24128 diff -urNp linux-3.0.4/drivers/char/agp/frontend.c linux-3.0.4/drivers/char/agp/frontend.c
24129 --- linux-3.0.4/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24130 +++ linux-3.0.4/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24131 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24132 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24133 return -EFAULT;
24134
24135 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24136 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24137 return -EFAULT;
24138
24139 client = agp_find_client_by_pid(reserve.pid);
24140 diff -urNp linux-3.0.4/drivers/char/briq_panel.c linux-3.0.4/drivers/char/briq_panel.c
24141 --- linux-3.0.4/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24142 +++ linux-3.0.4/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24143 @@ -9,6 +9,7 @@
24144 #include <linux/types.h>
24145 #include <linux/errno.h>
24146 #include <linux/tty.h>
24147 +#include <linux/mutex.h>
24148 #include <linux/timer.h>
24149 #include <linux/kernel.h>
24150 #include <linux/wait.h>
24151 @@ -34,6 +35,7 @@ static int vfd_is_open;
24152 static unsigned char vfd[40];
24153 static int vfd_cursor;
24154 static unsigned char ledpb, led;
24155 +static DEFINE_MUTEX(vfd_mutex);
24156
24157 static void update_vfd(void)
24158 {
24159 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24160 if (!vfd_is_open)
24161 return -EBUSY;
24162
24163 + mutex_lock(&vfd_mutex);
24164 for (;;) {
24165 char c;
24166 if (!indx)
24167 break;
24168 - if (get_user(c, buf))
24169 + if (get_user(c, buf)) {
24170 + mutex_unlock(&vfd_mutex);
24171 return -EFAULT;
24172 + }
24173 if (esc) {
24174 set_led(c);
24175 esc = 0;
24176 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
24177 buf++;
24178 }
24179 update_vfd();
24180 + mutex_unlock(&vfd_mutex);
24181
24182 return len;
24183 }
24184 diff -urNp linux-3.0.4/drivers/char/genrtc.c linux-3.0.4/drivers/char/genrtc.c
24185 --- linux-3.0.4/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
24186 +++ linux-3.0.4/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
24187 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
24188 switch (cmd) {
24189
24190 case RTC_PLL_GET:
24191 + memset(&pll, 0, sizeof(pll));
24192 if (get_rtc_pll(&pll))
24193 return -EINVAL;
24194 else
24195 diff -urNp linux-3.0.4/drivers/char/hpet.c linux-3.0.4/drivers/char/hpet.c
24196 --- linux-3.0.4/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
24197 +++ linux-3.0.4/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
24198 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
24199 }
24200
24201 static int
24202 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
24203 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
24204 struct hpet_info *info)
24205 {
24206 struct hpet_timer __iomem *timer;
24207 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c
24208 --- linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
24209 +++ linux-3.0.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
24210 @@ -415,7 +415,7 @@ struct ipmi_smi {
24211 struct proc_dir_entry *proc_dir;
24212 char proc_dir_name[10];
24213
24214 - atomic_t stats[IPMI_NUM_STATS];
24215 + atomic_unchecked_t stats[IPMI_NUM_STATS];
24216
24217 /*
24218 * run_to_completion duplicate of smb_info, smi_info
24219 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
24220
24221
24222 #define ipmi_inc_stat(intf, stat) \
24223 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
24224 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
24225 #define ipmi_get_stat(intf, stat) \
24226 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
24227 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
24228
24229 static int is_lan_addr(struct ipmi_addr *addr)
24230 {
24231 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
24232 INIT_LIST_HEAD(&intf->cmd_rcvrs);
24233 init_waitqueue_head(&intf->waitq);
24234 for (i = 0; i < IPMI_NUM_STATS; i++)
24235 - atomic_set(&intf->stats[i], 0);
24236 + atomic_set_unchecked(&intf->stats[i], 0);
24237
24238 intf->proc_dir = NULL;
24239
24240 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
24241 struct ipmi_smi_msg smi_msg;
24242 struct ipmi_recv_msg recv_msg;
24243
24244 + pax_track_stack();
24245 +
24246 si = (struct ipmi_system_interface_addr *) &addr;
24247 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
24248 si->channel = IPMI_BMC_CHANNEL;
24249 diff -urNp linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c
24250 --- linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
24251 +++ linux-3.0.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
24252 @@ -277,7 +277,7 @@ struct smi_info {
24253 unsigned char slave_addr;
24254
24255 /* Counters and things for the proc filesystem. */
24256 - atomic_t stats[SI_NUM_STATS];
24257 + atomic_unchecked_t stats[SI_NUM_STATS];
24258
24259 struct task_struct *thread;
24260
24261 @@ -286,9 +286,9 @@ struct smi_info {
24262 };
24263
24264 #define smi_inc_stat(smi, stat) \
24265 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
24266 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
24267 #define smi_get_stat(smi, stat) \
24268 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
24269 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
24270
24271 #define SI_MAX_PARMS 4
24272
24273 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
24274 atomic_set(&new_smi->req_events, 0);
24275 new_smi->run_to_completion = 0;
24276 for (i = 0; i < SI_NUM_STATS; i++)
24277 - atomic_set(&new_smi->stats[i], 0);
24278 + atomic_set_unchecked(&new_smi->stats[i], 0);
24279
24280 new_smi->interrupt_disabled = 1;
24281 atomic_set(&new_smi->stop_operation, 0);
24282 diff -urNp linux-3.0.4/drivers/char/Kconfig linux-3.0.4/drivers/char/Kconfig
24283 --- linux-3.0.4/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24284 +++ linux-3.0.4/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24285 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24286
24287 config DEVKMEM
24288 bool "/dev/kmem virtual device support"
24289 - default y
24290 + default n
24291 + depends on !GRKERNSEC_KMEM
24292 help
24293 Say Y here if you want to support the /dev/kmem device. The
24294 /dev/kmem device is rarely used, but can be used for certain
24295 @@ -596,6 +597,7 @@ config DEVPORT
24296 bool
24297 depends on !M68K
24298 depends on ISA || PCI
24299 + depends on !GRKERNSEC_KMEM
24300 default y
24301
24302 source "drivers/s390/char/Kconfig"
24303 diff -urNp linux-3.0.4/drivers/char/mem.c linux-3.0.4/drivers/char/mem.c
24304 --- linux-3.0.4/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
24305 +++ linux-3.0.4/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
24306 @@ -18,6 +18,7 @@
24307 #include <linux/raw.h>
24308 #include <linux/tty.h>
24309 #include <linux/capability.h>
24310 +#include <linux/security.h>
24311 #include <linux/ptrace.h>
24312 #include <linux/device.h>
24313 #include <linux/highmem.h>
24314 @@ -34,6 +35,10 @@
24315 # include <linux/efi.h>
24316 #endif
24317
24318 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24319 +extern struct file_operations grsec_fops;
24320 +#endif
24321 +
24322 static inline unsigned long size_inside_page(unsigned long start,
24323 unsigned long size)
24324 {
24325 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
24326
24327 while (cursor < to) {
24328 if (!devmem_is_allowed(pfn)) {
24329 +#ifdef CONFIG_GRKERNSEC_KMEM
24330 + gr_handle_mem_readwrite(from, to);
24331 +#else
24332 printk(KERN_INFO
24333 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24334 current->comm, from, to);
24335 +#endif
24336 return 0;
24337 }
24338 cursor += PAGE_SIZE;
24339 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
24340 }
24341 return 1;
24342 }
24343 +#elif defined(CONFIG_GRKERNSEC_KMEM)
24344 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24345 +{
24346 + return 0;
24347 +}
24348 #else
24349 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24350 {
24351 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
24352
24353 while (count > 0) {
24354 unsigned long remaining;
24355 + char *temp;
24356
24357 sz = size_inside_page(p, count);
24358
24359 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
24360 if (!ptr)
24361 return -EFAULT;
24362
24363 - remaining = copy_to_user(buf, ptr, sz);
24364 +#ifdef CONFIG_PAX_USERCOPY
24365 + temp = kmalloc(sz, GFP_KERNEL);
24366 + if (!temp) {
24367 + unxlate_dev_mem_ptr(p, ptr);
24368 + return -ENOMEM;
24369 + }
24370 + memcpy(temp, ptr, sz);
24371 +#else
24372 + temp = ptr;
24373 +#endif
24374 +
24375 + remaining = copy_to_user(buf, temp, sz);
24376 +
24377 +#ifdef CONFIG_PAX_USERCOPY
24378 + kfree(temp);
24379 +#endif
24380 +
24381 unxlate_dev_mem_ptr(p, ptr);
24382 if (remaining)
24383 return -EFAULT;
24384 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
24385 size_t count, loff_t *ppos)
24386 {
24387 unsigned long p = *ppos;
24388 - ssize_t low_count, read, sz;
24389 + ssize_t low_count, read, sz, err = 0;
24390 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
24391 - int err = 0;
24392
24393 read = 0;
24394 if (p < (unsigned long) high_memory) {
24395 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
24396 }
24397 #endif
24398 while (low_count > 0) {
24399 + char *temp;
24400 +
24401 sz = size_inside_page(p, low_count);
24402
24403 /*
24404 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
24405 */
24406 kbuf = xlate_dev_kmem_ptr((char *)p);
24407
24408 - if (copy_to_user(buf, kbuf, sz))
24409 +#ifdef CONFIG_PAX_USERCOPY
24410 + temp = kmalloc(sz, GFP_KERNEL);
24411 + if (!temp)
24412 + return -ENOMEM;
24413 + memcpy(temp, kbuf, sz);
24414 +#else
24415 + temp = kbuf;
24416 +#endif
24417 +
24418 + err = copy_to_user(buf, temp, sz);
24419 +
24420 +#ifdef CONFIG_PAX_USERCOPY
24421 + kfree(temp);
24422 +#endif
24423 +
24424 + if (err)
24425 return -EFAULT;
24426 buf += sz;
24427 p += sz;
24428 @@ -866,6 +913,9 @@ static const struct memdev {
24429 #ifdef CONFIG_CRASH_DUMP
24430 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24431 #endif
24432 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24433 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24434 +#endif
24435 };
24436
24437 static int memory_open(struct inode *inode, struct file *filp)
24438 diff -urNp linux-3.0.4/drivers/char/nvram.c linux-3.0.4/drivers/char/nvram.c
24439 --- linux-3.0.4/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
24440 +++ linux-3.0.4/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
24441 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24442
24443 spin_unlock_irq(&rtc_lock);
24444
24445 - if (copy_to_user(buf, contents, tmp - contents))
24446 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24447 return -EFAULT;
24448
24449 *ppos = i;
24450 diff -urNp linux-3.0.4/drivers/char/random.c linux-3.0.4/drivers/char/random.c
24451 --- linux-3.0.4/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
24452 +++ linux-3.0.4/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
24453 @@ -261,8 +261,13 @@
24454 /*
24455 * Configuration information
24456 */
24457 +#ifdef CONFIG_GRKERNSEC_RANDNET
24458 +#define INPUT_POOL_WORDS 512
24459 +#define OUTPUT_POOL_WORDS 128
24460 +#else
24461 #define INPUT_POOL_WORDS 128
24462 #define OUTPUT_POOL_WORDS 32
24463 +#endif
24464 #define SEC_XFER_SIZE 512
24465 #define EXTRACT_SIZE 10
24466
24467 @@ -300,10 +305,17 @@ static struct poolinfo {
24468 int poolwords;
24469 int tap1, tap2, tap3, tap4, tap5;
24470 } poolinfo_table[] = {
24471 +#ifdef CONFIG_GRKERNSEC_RANDNET
24472 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24473 + { 512, 411, 308, 208, 104, 1 },
24474 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24475 + { 128, 103, 76, 51, 25, 1 },
24476 +#else
24477 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24478 { 128, 103, 76, 51, 25, 1 },
24479 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24480 { 32, 26, 20, 14, 7, 1 },
24481 +#endif
24482 #if 0
24483 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24484 { 2048, 1638, 1231, 819, 411, 1 },
24485 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24486
24487 extract_buf(r, tmp);
24488 i = min_t(int, nbytes, EXTRACT_SIZE);
24489 - if (copy_to_user(buf, tmp, i)) {
24490 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24491 ret = -EFAULT;
24492 break;
24493 }
24494 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24495 #include <linux/sysctl.h>
24496
24497 static int min_read_thresh = 8, min_write_thresh;
24498 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
24499 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24500 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24501 static char sysctl_bootid[16];
24502
24503 diff -urNp linux-3.0.4/drivers/char/sonypi.c linux-3.0.4/drivers/char/sonypi.c
24504 --- linux-3.0.4/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
24505 +++ linux-3.0.4/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
24506 @@ -55,6 +55,7 @@
24507 #include <asm/uaccess.h>
24508 #include <asm/io.h>
24509 #include <asm/system.h>
24510 +#include <asm/local.h>
24511
24512 #include <linux/sonypi.h>
24513
24514 @@ -491,7 +492,7 @@ static struct sonypi_device {
24515 spinlock_t fifo_lock;
24516 wait_queue_head_t fifo_proc_list;
24517 struct fasync_struct *fifo_async;
24518 - int open_count;
24519 + local_t open_count;
24520 int model;
24521 struct input_dev *input_jog_dev;
24522 struct input_dev *input_key_dev;
24523 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24524 static int sonypi_misc_release(struct inode *inode, struct file *file)
24525 {
24526 mutex_lock(&sonypi_device.lock);
24527 - sonypi_device.open_count--;
24528 + local_dec(&sonypi_device.open_count);
24529 mutex_unlock(&sonypi_device.lock);
24530 return 0;
24531 }
24532 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24533 {
24534 mutex_lock(&sonypi_device.lock);
24535 /* Flush input queue on first open */
24536 - if (!sonypi_device.open_count)
24537 + if (!local_read(&sonypi_device.open_count))
24538 kfifo_reset(&sonypi_device.fifo);
24539 - sonypi_device.open_count++;
24540 + local_inc(&sonypi_device.open_count);
24541 mutex_unlock(&sonypi_device.lock);
24542
24543 return 0;
24544 diff -urNp linux-3.0.4/drivers/char/tpm/tpm_bios.c linux-3.0.4/drivers/char/tpm/tpm_bios.c
24545 --- linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
24546 +++ linux-3.0.4/drivers/char/tpm/tpm_bios.c 2011-08-23 21:47:55.000000000 -0400
24547 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24548 event = addr;
24549
24550 if ((event->event_type == 0 && event->event_size == 0) ||
24551 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24552 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24553 return NULL;
24554
24555 return addr;
24556 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24557 return NULL;
24558
24559 if ((event->event_type == 0 && event->event_size == 0) ||
24560 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24561 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24562 return NULL;
24563
24564 (*pos)++;
24565 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24566 int i;
24567
24568 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24569 - seq_putc(m, data[i]);
24570 + if (!seq_putc(m, data[i]))
24571 + return -EFAULT;
24572
24573 return 0;
24574 }
24575 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24576 log->bios_event_log_end = log->bios_event_log + len;
24577
24578 virt = acpi_os_map_memory(start, len);
24579 + if (!virt) {
24580 + kfree(log->bios_event_log);
24581 + log->bios_event_log = NULL;
24582 + return -EFAULT;
24583 + }
24584
24585 memcpy(log->bios_event_log, virt, len);
24586
24587 diff -urNp linux-3.0.4/drivers/char/tpm/tpm.c linux-3.0.4/drivers/char/tpm/tpm.c
24588 --- linux-3.0.4/drivers/char/tpm/tpm.c 2011-07-21 22:17:23.000000000 -0400
24589 +++ linux-3.0.4/drivers/char/tpm/tpm.c 2011-08-23 21:48:14.000000000 -0400
24590 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24591 chip->vendor.req_complete_val)
24592 goto out_recv;
24593
24594 - if ((status == chip->vendor.req_canceled)) {
24595 + if (status == chip->vendor.req_canceled) {
24596 dev_err(chip->dev, "Operation Canceled\n");
24597 rc = -ECANCELED;
24598 goto out;
24599 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24600
24601 struct tpm_chip *chip = dev_get_drvdata(dev);
24602
24603 + pax_track_stack();
24604 +
24605 tpm_cmd.header.in = tpm_readpubek_header;
24606 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24607 "attempting to read the PUBEK");
24608 diff -urNp linux-3.0.4/drivers/crypto/hifn_795x.c linux-3.0.4/drivers/crypto/hifn_795x.c
24609 --- linux-3.0.4/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
24610 +++ linux-3.0.4/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
24611 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24612 0xCA, 0x34, 0x2B, 0x2E};
24613 struct scatterlist sg;
24614
24615 + pax_track_stack();
24616 +
24617 memset(src, 0, sizeof(src));
24618 memset(ctx.key, 0, sizeof(ctx.key));
24619
24620 diff -urNp linux-3.0.4/drivers/crypto/padlock-aes.c linux-3.0.4/drivers/crypto/padlock-aes.c
24621 --- linux-3.0.4/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
24622 +++ linux-3.0.4/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
24623 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24624 struct crypto_aes_ctx gen_aes;
24625 int cpu;
24626
24627 + pax_track_stack();
24628 +
24629 if (key_len % 8) {
24630 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24631 return -EINVAL;
24632 diff -urNp linux-3.0.4/drivers/edac/edac_pci_sysfs.c linux-3.0.4/drivers/edac/edac_pci_sysfs.c
24633 --- linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
24634 +++ linux-3.0.4/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
24635 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24636 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24637 static int edac_pci_poll_msec = 1000; /* one second workq period */
24638
24639 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24640 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24641 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24642 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24643
24644 static struct kobject *edac_pci_top_main_kobj;
24645 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24646 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24647 edac_printk(KERN_CRIT, EDAC_PCI,
24648 "Signaled System Error on %s\n",
24649 pci_name(dev));
24650 - atomic_inc(&pci_nonparity_count);
24651 + atomic_inc_unchecked(&pci_nonparity_count);
24652 }
24653
24654 if (status & (PCI_STATUS_PARITY)) {
24655 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24656 "Master Data Parity Error on %s\n",
24657 pci_name(dev));
24658
24659 - atomic_inc(&pci_parity_count);
24660 + atomic_inc_unchecked(&pci_parity_count);
24661 }
24662
24663 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24664 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24665 "Detected Parity Error on %s\n",
24666 pci_name(dev));
24667
24668 - atomic_inc(&pci_parity_count);
24669 + atomic_inc_unchecked(&pci_parity_count);
24670 }
24671 }
24672
24673 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24674 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24675 "Signaled System Error on %s\n",
24676 pci_name(dev));
24677 - atomic_inc(&pci_nonparity_count);
24678 + atomic_inc_unchecked(&pci_nonparity_count);
24679 }
24680
24681 if (status & (PCI_STATUS_PARITY)) {
24682 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24683 "Master Data Parity Error on "
24684 "%s\n", pci_name(dev));
24685
24686 - atomic_inc(&pci_parity_count);
24687 + atomic_inc_unchecked(&pci_parity_count);
24688 }
24689
24690 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24691 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24692 "Detected Parity Error on %s\n",
24693 pci_name(dev));
24694
24695 - atomic_inc(&pci_parity_count);
24696 + atomic_inc_unchecked(&pci_parity_count);
24697 }
24698 }
24699 }
24700 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24701 if (!check_pci_errors)
24702 return;
24703
24704 - before_count = atomic_read(&pci_parity_count);
24705 + before_count = atomic_read_unchecked(&pci_parity_count);
24706
24707 /* scan all PCI devices looking for a Parity Error on devices and
24708 * bridges.
24709 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24710 /* Only if operator has selected panic on PCI Error */
24711 if (edac_pci_get_panic_on_pe()) {
24712 /* If the count is different 'after' from 'before' */
24713 - if (before_count != atomic_read(&pci_parity_count))
24714 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24715 panic("EDAC: PCI Parity Error");
24716 }
24717 }
24718 diff -urNp linux-3.0.4/drivers/edac/mce_amd.h linux-3.0.4/drivers/edac/mce_amd.h
24719 --- linux-3.0.4/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
24720 +++ linux-3.0.4/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
24721 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24722 bool (*dc_mce)(u16, u8);
24723 bool (*ic_mce)(u16, u8);
24724 bool (*nb_mce)(u16, u8);
24725 -};
24726 +} __no_const;
24727
24728 void amd_report_gart_errors(bool);
24729 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24730 diff -urNp linux-3.0.4/drivers/firewire/core-card.c linux-3.0.4/drivers/firewire/core-card.c
24731 --- linux-3.0.4/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
24732 +++ linux-3.0.4/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
24733 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
24734
24735 void fw_core_remove_card(struct fw_card *card)
24736 {
24737 - struct fw_card_driver dummy_driver = dummy_driver_template;
24738 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24739
24740 card->driver->update_phy_reg(card, 4,
24741 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24742 diff -urNp linux-3.0.4/drivers/firewire/core-cdev.c linux-3.0.4/drivers/firewire/core-cdev.c
24743 --- linux-3.0.4/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
24744 +++ linux-3.0.4/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
24745 @@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
24746 int ret;
24747
24748 if ((request->channels == 0 && request->bandwidth == 0) ||
24749 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24750 - request->bandwidth < 0)
24751 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24752 return -EINVAL;
24753
24754 r = kmalloc(sizeof(*r), GFP_KERNEL);
24755 diff -urNp linux-3.0.4/drivers/firewire/core.h linux-3.0.4/drivers/firewire/core.h
24756 --- linux-3.0.4/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
24757 +++ linux-3.0.4/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
24758 @@ -101,6 +101,7 @@ struct fw_card_driver {
24759
24760 int (*stop_iso)(struct fw_iso_context *ctx);
24761 };
24762 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24763
24764 void fw_card_initialize(struct fw_card *card,
24765 const struct fw_card_driver *driver, struct device *device);
24766 diff -urNp linux-3.0.4/drivers/firewire/core-transaction.c linux-3.0.4/drivers/firewire/core-transaction.c
24767 --- linux-3.0.4/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
24768 +++ linux-3.0.4/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
24769 @@ -37,6 +37,7 @@
24770 #include <linux/timer.h>
24771 #include <linux/types.h>
24772 #include <linux/workqueue.h>
24773 +#include <linux/sched.h>
24774
24775 #include <asm/byteorder.h>
24776
24777 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
24778 struct transaction_callback_data d;
24779 struct fw_transaction t;
24780
24781 + pax_track_stack();
24782 +
24783 init_timer_on_stack(&t.split_timeout_timer);
24784 init_completion(&d.done);
24785 d.payload = payload;
24786 diff -urNp linux-3.0.4/drivers/firmware/dmi_scan.c linux-3.0.4/drivers/firmware/dmi_scan.c
24787 --- linux-3.0.4/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
24788 +++ linux-3.0.4/drivers/firmware/dmi_scan.c 2011-08-23 21:47:55.000000000 -0400
24789 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24790 }
24791 }
24792 else {
24793 - /*
24794 - * no iounmap() for that ioremap(); it would be a no-op, but
24795 - * it's so early in setup that sucker gets confused into doing
24796 - * what it shouldn't if we actually call it.
24797 - */
24798 p = dmi_ioremap(0xF0000, 0x10000);
24799 if (p == NULL)
24800 goto error;
24801 diff -urNp linux-3.0.4/drivers/gpio/vr41xx_giu.c linux-3.0.4/drivers/gpio/vr41xx_giu.c
24802 --- linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
24803 +++ linux-3.0.4/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
24804 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24805 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24806 maskl, pendl, maskh, pendh);
24807
24808 - atomic_inc(&irq_err_count);
24809 + atomic_inc_unchecked(&irq_err_count);
24810
24811 return -EINVAL;
24812 }
24813 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c
24814 --- linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
24815 +++ linux-3.0.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
24816 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24817 struct drm_crtc *tmp;
24818 int crtc_mask = 1;
24819
24820 - WARN(!crtc, "checking null crtc?\n");
24821 + BUG_ON(!crtc);
24822
24823 dev = crtc->dev;
24824
24825 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24826 struct drm_encoder *encoder;
24827 bool ret = true;
24828
24829 + pax_track_stack();
24830 +
24831 crtc->enabled = drm_helper_crtc_in_use(crtc);
24832 if (!crtc->enabled)
24833 return true;
24834 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_drv.c linux-3.0.4/drivers/gpu/drm/drm_drv.c
24835 --- linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
24836 +++ linux-3.0.4/drivers/gpu/drm/drm_drv.c 2011-08-23 21:47:55.000000000 -0400
24837 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24838
24839 dev = file_priv->minor->dev;
24840 atomic_inc(&dev->ioctl_count);
24841 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24842 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24843 ++file_priv->ioctl_count;
24844
24845 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24846 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_fops.c linux-3.0.4/drivers/gpu/drm/drm_fops.c
24847 --- linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
24848 +++ linux-3.0.4/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
24849 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24850 }
24851
24852 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24853 - atomic_set(&dev->counts[i], 0);
24854 + atomic_set_unchecked(&dev->counts[i], 0);
24855
24856 dev->sigdata.lock = NULL;
24857
24858 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24859
24860 retcode = drm_open_helper(inode, filp, dev);
24861 if (!retcode) {
24862 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24863 - if (!dev->open_count++)
24864 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24865 + if (local_inc_return(&dev->open_count) == 1)
24866 retcode = drm_setup(dev);
24867 }
24868 if (!retcode) {
24869 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24870
24871 mutex_lock(&drm_global_mutex);
24872
24873 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24874 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24875
24876 if (dev->driver->preclose)
24877 dev->driver->preclose(dev, file_priv);
24878 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24879 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24880 task_pid_nr(current),
24881 (long)old_encode_dev(file_priv->minor->device),
24882 - dev->open_count);
24883 + local_read(&dev->open_count));
24884
24885 /* if the master has gone away we can't do anything with the lock */
24886 if (file_priv->minor->master)
24887 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24888 * End inline drm_release
24889 */
24890
24891 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24892 - if (!--dev->open_count) {
24893 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24894 + if (local_dec_and_test(&dev->open_count)) {
24895 if (atomic_read(&dev->ioctl_count)) {
24896 DRM_ERROR("Device busy: %d\n",
24897 atomic_read(&dev->ioctl_count));
24898 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_global.c linux-3.0.4/drivers/gpu/drm/drm_global.c
24899 --- linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
24900 +++ linux-3.0.4/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
24901 @@ -36,7 +36,7 @@
24902 struct drm_global_item {
24903 struct mutex mutex;
24904 void *object;
24905 - int refcount;
24906 + atomic_t refcount;
24907 };
24908
24909 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24910 @@ -49,7 +49,7 @@ void drm_global_init(void)
24911 struct drm_global_item *item = &glob[i];
24912 mutex_init(&item->mutex);
24913 item->object = NULL;
24914 - item->refcount = 0;
24915 + atomic_set(&item->refcount, 0);
24916 }
24917 }
24918
24919 @@ -59,7 +59,7 @@ void drm_global_release(void)
24920 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24921 struct drm_global_item *item = &glob[i];
24922 BUG_ON(item->object != NULL);
24923 - BUG_ON(item->refcount != 0);
24924 + BUG_ON(atomic_read(&item->refcount) != 0);
24925 }
24926 }
24927
24928 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24929 void *object;
24930
24931 mutex_lock(&item->mutex);
24932 - if (item->refcount == 0) {
24933 + if (atomic_read(&item->refcount) == 0) {
24934 item->object = kzalloc(ref->size, GFP_KERNEL);
24935 if (unlikely(item->object == NULL)) {
24936 ret = -ENOMEM;
24937 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24938 goto out_err;
24939
24940 }
24941 - ++item->refcount;
24942 + atomic_inc(&item->refcount);
24943 ref->object = item->object;
24944 object = item->object;
24945 mutex_unlock(&item->mutex);
24946 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24947 struct drm_global_item *item = &glob[ref->global_type];
24948
24949 mutex_lock(&item->mutex);
24950 - BUG_ON(item->refcount == 0);
24951 + BUG_ON(atomic_read(&item->refcount) == 0);
24952 BUG_ON(ref->object != item->object);
24953 - if (--item->refcount == 0) {
24954 + if (atomic_dec_and_test(&item->refcount)) {
24955 ref->release(ref);
24956 item->object = NULL;
24957 }
24958 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_info.c linux-3.0.4/drivers/gpu/drm/drm_info.c
24959 --- linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
24960 +++ linux-3.0.4/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
24961 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24962 struct drm_local_map *map;
24963 struct drm_map_list *r_list;
24964
24965 - /* Hardcoded from _DRM_FRAME_BUFFER,
24966 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24967 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24968 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24969 + static const char * const types[] = {
24970 + [_DRM_FRAME_BUFFER] = "FB",
24971 + [_DRM_REGISTERS] = "REG",
24972 + [_DRM_SHM] = "SHM",
24973 + [_DRM_AGP] = "AGP",
24974 + [_DRM_SCATTER_GATHER] = "SG",
24975 + [_DRM_CONSISTENT] = "PCI",
24976 + [_DRM_GEM] = "GEM" };
24977 const char *type;
24978 int i;
24979
24980 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24981 map = r_list->map;
24982 if (!map)
24983 continue;
24984 - if (map->type < 0 || map->type > 5)
24985 + if (map->type >= ARRAY_SIZE(types))
24986 type = "??";
24987 else
24988 type = types[map->type];
24989 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24990 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24991 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24992 vma->vm_flags & VM_IO ? 'i' : '-',
24993 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24994 + 0);
24995 +#else
24996 vma->vm_pgoff);
24997 +#endif
24998
24999 #if defined(__i386__)
25000 pgprot = pgprot_val(vma->vm_page_prot);
25001 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_ioctl.c linux-3.0.4/drivers/gpu/drm/drm_ioctl.c
25002 --- linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
25003 +++ linux-3.0.4/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
25004 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
25005 stats->data[i].value =
25006 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
25007 else
25008 - stats->data[i].value = atomic_read(&dev->counts[i]);
25009 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
25010 stats->data[i].type = dev->types[i];
25011 }
25012
25013 diff -urNp linux-3.0.4/drivers/gpu/drm/drm_lock.c linux-3.0.4/drivers/gpu/drm/drm_lock.c
25014 --- linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
25015 +++ linux-3.0.4/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
25016 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
25017 if (drm_lock_take(&master->lock, lock->context)) {
25018 master->lock.file_priv = file_priv;
25019 master->lock.lock_time = jiffies;
25020 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
25021 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
25022 break; /* Got lock */
25023 }
25024
25025 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
25026 return -EINVAL;
25027 }
25028
25029 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
25030 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
25031
25032 if (drm_lock_free(&master->lock, lock->context)) {
25033 /* FIXME: Should really bail out here. */
25034 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c
25035 --- linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
25036 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
25037 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
25038 dma->buflist[vertex->idx],
25039 vertex->discard, vertex->used);
25040
25041 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25042 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25043 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
25044 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25045 sarea_priv->last_enqueue = dev_priv->counter - 1;
25046 sarea_priv->last_dispatch = (int)hw_status[5];
25047
25048 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
25049 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
25050 mc->last_render);
25051
25052 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25053 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
25054 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
25055 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
25056 sarea_priv->last_enqueue = dev_priv->counter - 1;
25057 sarea_priv->last_dispatch = (int)hw_status[5];
25058
25059 diff -urNp linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h
25060 --- linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
25061 +++ linux-3.0.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
25062 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
25063 int page_flipping;
25064
25065 wait_queue_head_t irq_queue;
25066 - atomic_t irq_received;
25067 - atomic_t irq_emitted;
25068 + atomic_unchecked_t irq_received;
25069 + atomic_unchecked_t irq_emitted;
25070
25071 int front_offset;
25072 } drm_i810_private_t;
25073 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c
25074 --- linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
25075 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-23 21:47:55.000000000 -0400
25076 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
25077 I915_READ(GTIMR));
25078 }
25079 seq_printf(m, "Interrupts received: %d\n",
25080 - atomic_read(&dev_priv->irq_received));
25081 + atomic_read_unchecked(&dev_priv->irq_received));
25082 for (i = 0; i < I915_NUM_RINGS; i++) {
25083 if (IS_GEN6(dev)) {
25084 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
25085 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c
25086 --- linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
25087 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
25088 @@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
25089 bool can_switch;
25090
25091 spin_lock(&dev->count_lock);
25092 - can_switch = (dev->open_count == 0);
25093 + can_switch = (local_read(&dev->open_count) == 0);
25094 spin_unlock(&dev->count_lock);
25095 return can_switch;
25096 }
25097 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h
25098 --- linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
25099 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
25100 @@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
25101 /* render clock increase/decrease */
25102 /* display clock increase/decrease */
25103 /* pll clock increase/decrease */
25104 -};
25105 +} __no_const;
25106
25107 struct intel_device_info {
25108 u8 gen;
25109 @@ -300,7 +300,7 @@ typedef struct drm_i915_private {
25110 int current_page;
25111 int page_flipping;
25112
25113 - atomic_t irq_received;
25114 + atomic_unchecked_t irq_received;
25115
25116 /* protects the irq masks */
25117 spinlock_t irq_lock;
25118 @@ -874,7 +874,7 @@ struct drm_i915_gem_object {
25119 * will be page flipped away on the next vblank. When it
25120 * reaches 0, dev_priv->pending_flip_queue will be woken up.
25121 */
25122 - atomic_t pending_flip;
25123 + atomic_unchecked_t pending_flip;
25124 };
25125
25126 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
25127 @@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
25128 extern void intel_teardown_gmbus(struct drm_device *dev);
25129 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
25130 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
25131 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25132 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
25133 {
25134 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
25135 }
25136 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
25137 --- linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
25138 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
25139 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
25140 i915_gem_clflush_object(obj);
25141
25142 if (obj->base.pending_write_domain)
25143 - cd->flips |= atomic_read(&obj->pending_flip);
25144 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
25145
25146 /* The actual obj->write_domain will be updated with
25147 * pending_write_domain after we emit the accumulated flush for all
25148 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c
25149 --- linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
25150 +++ linux-3.0.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
25151 @@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
25152 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
25153 struct drm_i915_master_private *master_priv;
25154
25155 - atomic_inc(&dev_priv->irq_received);
25156 + atomic_inc_unchecked(&dev_priv->irq_received);
25157
25158 /* disable master interrupt before clearing iir */
25159 de_ier = I915_READ(DEIER);
25160 @@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
25161 struct drm_i915_master_private *master_priv;
25162 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
25163
25164 - atomic_inc(&dev_priv->irq_received);
25165 + atomic_inc_unchecked(&dev_priv->irq_received);
25166
25167 if (IS_GEN6(dev))
25168 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
25169 @@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
25170 int ret = IRQ_NONE, pipe;
25171 bool blc_event = false;
25172
25173 - atomic_inc(&dev_priv->irq_received);
25174 + atomic_inc_unchecked(&dev_priv->irq_received);
25175
25176 iir = I915_READ(IIR);
25177
25178 @@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
25179 {
25180 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25181
25182 - atomic_set(&dev_priv->irq_received, 0);
25183 + atomic_set_unchecked(&dev_priv->irq_received, 0);
25184
25185 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25186 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25187 @@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
25188 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
25189 int pipe;
25190
25191 - atomic_set(&dev_priv->irq_received, 0);
25192 + atomic_set_unchecked(&dev_priv->irq_received, 0);
25193
25194 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
25195 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
25196 diff -urNp linux-3.0.4/drivers/gpu/drm/i915/intel_display.c linux-3.0.4/drivers/gpu/drm/i915/intel_display.c
25197 --- linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
25198 +++ linux-3.0.4/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
25199 @@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
25200
25201 wait_event(dev_priv->pending_flip_queue,
25202 atomic_read(&dev_priv->mm.wedged) ||
25203 - atomic_read(&obj->pending_flip) == 0);
25204 + atomic_read_unchecked(&obj->pending_flip) == 0);
25205
25206 /* Big Hammer, we also need to ensure that any pending
25207 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
25208 @@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
25209 obj = to_intel_framebuffer(crtc->fb)->obj;
25210 dev_priv = crtc->dev->dev_private;
25211 wait_event(dev_priv->pending_flip_queue,
25212 - atomic_read(&obj->pending_flip) == 0);
25213 + atomic_read_unchecked(&obj->pending_flip) == 0);
25214 }
25215
25216 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
25217 @@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
25218
25219 atomic_clear_mask(1 << intel_crtc->plane,
25220 &obj->pending_flip.counter);
25221 - if (atomic_read(&obj->pending_flip) == 0)
25222 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
25223 wake_up(&dev_priv->pending_flip_queue);
25224
25225 schedule_work(&work->work);
25226 @@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
25227 /* Block clients from rendering to the new back buffer until
25228 * the flip occurs and the object is no longer visible.
25229 */
25230 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25231 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25232
25233 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
25234 if (ret)
25235 @@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
25236 return 0;
25237
25238 cleanup_pending:
25239 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25240 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
25241 cleanup_objs:
25242 drm_gem_object_unreference(&work->old_fb_obj->base);
25243 drm_gem_object_unreference(&obj->base);
25244 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h
25245 --- linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
25246 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
25247 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
25248 u32 clear_cmd;
25249 u32 maccess;
25250
25251 - atomic_t vbl_received; /**< Number of vblanks received. */
25252 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
25253 wait_queue_head_t fence_queue;
25254 - atomic_t last_fence_retired;
25255 + atomic_unchecked_t last_fence_retired;
25256 u32 next_fence_to_post;
25257
25258 unsigned int fb_cpp;
25259 diff -urNp linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c
25260 --- linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
25261 +++ linux-3.0.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
25262 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
25263 if (crtc != 0)
25264 return 0;
25265
25266 - return atomic_read(&dev_priv->vbl_received);
25267 + return atomic_read_unchecked(&dev_priv->vbl_received);
25268 }
25269
25270
25271 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
25272 /* VBLANK interrupt */
25273 if (status & MGA_VLINEPEN) {
25274 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
25275 - atomic_inc(&dev_priv->vbl_received);
25276 + atomic_inc_unchecked(&dev_priv->vbl_received);
25277 drm_handle_vblank(dev, 0);
25278 handled = 1;
25279 }
25280 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
25281 if ((prim_start & ~0x03) != (prim_end & ~0x03))
25282 MGA_WRITE(MGA_PRIMEND, prim_end);
25283
25284 - atomic_inc(&dev_priv->last_fence_retired);
25285 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
25286 DRM_WAKEUP(&dev_priv->fence_queue);
25287 handled = 1;
25288 }
25289 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
25290 * using fences.
25291 */
25292 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
25293 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
25294 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
25295 - *sequence) <= (1 << 23)));
25296
25297 *sequence = cur_fence;
25298 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c
25299 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
25300 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
25301 @@ -200,7 +200,7 @@ struct methods {
25302 const char desc[8];
25303 void (*loadbios)(struct drm_device *, uint8_t *);
25304 const bool rw;
25305 -};
25306 +} __do_const;
25307
25308 static struct methods shadow_methods[] = {
25309 { "PRAMIN", load_vbios_pramin, true },
25310 @@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
25311 struct bit_table {
25312 const char id;
25313 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
25314 -};
25315 +} __no_const;
25316
25317 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
25318
25319 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h
25320 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
25321 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
25322 @@ -227,7 +227,7 @@ struct nouveau_channel {
25323 struct list_head pending;
25324 uint32_t sequence;
25325 uint32_t sequence_ack;
25326 - atomic_t last_sequence_irq;
25327 + atomic_unchecked_t last_sequence_irq;
25328 } fence;
25329
25330 /* DMA push buffer */
25331 @@ -304,7 +304,7 @@ struct nouveau_exec_engine {
25332 u32 handle, u16 class);
25333 void (*set_tile_region)(struct drm_device *dev, int i);
25334 void (*tlb_flush)(struct drm_device *, int engine);
25335 -};
25336 +} __no_const;
25337
25338 struct nouveau_instmem_engine {
25339 void *priv;
25340 @@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
25341 struct nouveau_mc_engine {
25342 int (*init)(struct drm_device *dev);
25343 void (*takedown)(struct drm_device *dev);
25344 -};
25345 +} __no_const;
25346
25347 struct nouveau_timer_engine {
25348 int (*init)(struct drm_device *dev);
25349 void (*takedown)(struct drm_device *dev);
25350 uint64_t (*read)(struct drm_device *dev);
25351 -};
25352 +} __no_const;
25353
25354 struct nouveau_fb_engine {
25355 int num_tiles;
25356 @@ -494,7 +494,7 @@ struct nouveau_vram_engine {
25357 void (*put)(struct drm_device *, struct nouveau_mem **);
25358
25359 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
25360 -};
25361 +} __no_const;
25362
25363 struct nouveau_engine {
25364 struct nouveau_instmem_engine instmem;
25365 @@ -640,7 +640,7 @@ struct drm_nouveau_private {
25366 struct drm_global_reference mem_global_ref;
25367 struct ttm_bo_global_ref bo_global_ref;
25368 struct ttm_bo_device bdev;
25369 - atomic_t validate_sequence;
25370 + atomic_unchecked_t validate_sequence;
25371 } ttm;
25372
25373 struct {
25374 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c
25375 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
25376 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
25377 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
25378 if (USE_REFCNT(dev))
25379 sequence = nvchan_rd32(chan, 0x48);
25380 else
25381 - sequence = atomic_read(&chan->fence.last_sequence_irq);
25382 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
25383
25384 if (chan->fence.sequence_ack == sequence)
25385 goto out;
25386 @@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
25387
25388 INIT_LIST_HEAD(&chan->fence.pending);
25389 spin_lock_init(&chan->fence.lock);
25390 - atomic_set(&chan->fence.last_sequence_irq, 0);
25391 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
25392 return 0;
25393 }
25394
25395 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c
25396 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
25397 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
25398 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
25399 int trycnt = 0;
25400 int ret, i;
25401
25402 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
25403 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
25404 retry:
25405 if (++trycnt > 100000) {
25406 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
25407 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c
25408 --- linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
25409 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
25410 @@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
25411 bool can_switch;
25412
25413 spin_lock(&dev->count_lock);
25414 - can_switch = (dev->open_count == 0);
25415 + can_switch = (local_read(&dev->open_count) == 0);
25416 spin_unlock(&dev->count_lock);
25417 return can_switch;
25418 }
25419 diff -urNp linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c
25420 --- linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
25421 +++ linux-3.0.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
25422 @@ -560,7 +560,7 @@ static int
25423 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
25424 u32 class, u32 mthd, u32 data)
25425 {
25426 - atomic_set(&chan->fence.last_sequence_irq, data);
25427 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
25428 return 0;
25429 }
25430
25431 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c
25432 --- linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
25433 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
25434 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
25435
25436 /* GH: Simple idle check.
25437 */
25438 - atomic_set(&dev_priv->idle_count, 0);
25439 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25440
25441 /* We don't support anything other than bus-mastering ring mode,
25442 * but the ring can be in either AGP or PCI space for the ring
25443 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h
25444 --- linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
25445 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
25446 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
25447 int is_pci;
25448 unsigned long cce_buffers_offset;
25449
25450 - atomic_t idle_count;
25451 + atomic_unchecked_t idle_count;
25452
25453 int page_flipping;
25454 int current_page;
25455 u32 crtc_offset;
25456 u32 crtc_offset_cntl;
25457
25458 - atomic_t vbl_received;
25459 + atomic_unchecked_t vbl_received;
25460
25461 u32 color_fmt;
25462 unsigned int front_offset;
25463 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c
25464 --- linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
25465 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
25466 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
25467 if (crtc != 0)
25468 return 0;
25469
25470 - return atomic_read(&dev_priv->vbl_received);
25471 + return atomic_read_unchecked(&dev_priv->vbl_received);
25472 }
25473
25474 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
25475 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
25476 /* VBLANK interrupt */
25477 if (status & R128_CRTC_VBLANK_INT) {
25478 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
25479 - atomic_inc(&dev_priv->vbl_received);
25480 + atomic_inc_unchecked(&dev_priv->vbl_received);
25481 drm_handle_vblank(dev, 0);
25482 return IRQ_HANDLED;
25483 }
25484 diff -urNp linux-3.0.4/drivers/gpu/drm/r128/r128_state.c linux-3.0.4/drivers/gpu/drm/r128/r128_state.c
25485 --- linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
25486 +++ linux-3.0.4/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
25487 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25488
25489 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25490 {
25491 - if (atomic_read(&dev_priv->idle_count) == 0)
25492 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25493 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25494 else
25495 - atomic_set(&dev_priv->idle_count, 0);
25496 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25497 }
25498
25499 #endif
25500 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/atom.c linux-3.0.4/drivers/gpu/drm/radeon/atom.c
25501 --- linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
25502 +++ linux-3.0.4/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
25503 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25504 char name[512];
25505 int i;
25506
25507 + pax_track_stack();
25508 +
25509 ctx->card = card;
25510 ctx->bios = bios;
25511
25512 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c
25513 --- linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
25514 +++ linux-3.0.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
25515 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25516 regex_t mask_rex;
25517 regmatch_t match[4];
25518 char buf[1024];
25519 - size_t end;
25520 + long end;
25521 int len;
25522 int done = 0;
25523 int r;
25524 unsigned o;
25525 struct offset *offset;
25526 char last_reg_s[10];
25527 - int last_reg;
25528 + unsigned long last_reg;
25529
25530 if (regcomp
25531 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25532 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c
25533 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
25534 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
25535 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25536 struct radeon_gpio_rec gpio;
25537 struct radeon_hpd hpd;
25538
25539 + pax_track_stack();
25540 +
25541 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25542 return false;
25543
25544 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c
25545 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
25546 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
25547 @@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
25548 bool can_switch;
25549
25550 spin_lock(&dev->count_lock);
25551 - can_switch = (dev->open_count == 0);
25552 + can_switch = (local_read(&dev->open_count) == 0);
25553 spin_unlock(&dev->count_lock);
25554 return can_switch;
25555 }
25556 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c
25557 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
25558 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
25559 @@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
25560 uint32_t post_div;
25561 u32 pll_out_min, pll_out_max;
25562
25563 + pax_track_stack();
25564 +
25565 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25566 freq = freq * 1000;
25567
25568 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h
25569 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
25570 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
25571 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25572
25573 /* SW interrupt */
25574 wait_queue_head_t swi_queue;
25575 - atomic_t swi_emitted;
25576 + atomic_unchecked_t swi_emitted;
25577 int vblank_crtc;
25578 uint32_t irq_enable_reg;
25579 uint32_t r500_disp_irq_reg;
25580 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c
25581 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
25582 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
25583 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
25584 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25585 return 0;
25586 }
25587 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25588 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25589 if (!rdev->cp.ready)
25590 /* FIXME: cp is not running assume everythings is done right
25591 * away
25592 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
25593 return r;
25594 }
25595 radeon_fence_write(rdev, 0);
25596 - atomic_set(&rdev->fence_drv.seq, 0);
25597 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25598 INIT_LIST_HEAD(&rdev->fence_drv.created);
25599 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25600 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25601 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon.h linux-3.0.4/drivers/gpu/drm/radeon/radeon.h
25602 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-07-21 22:17:23.000000000 -0400
25603 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon.h 2011-08-23 21:47:55.000000000 -0400
25604 @@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
25605 */
25606 struct radeon_fence_driver {
25607 uint32_t scratch_reg;
25608 - atomic_t seq;
25609 + atomic_unchecked_t seq;
25610 uint32_t last_seq;
25611 unsigned long last_jiffies;
25612 unsigned long last_timeout;
25613 @@ -960,7 +960,7 @@ struct radeon_asic {
25614 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25615 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25616 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25617 -};
25618 +} __no_const;
25619
25620 /*
25621 * Asic structures
25622 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25623 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25624 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
25625 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25626 request = compat_alloc_user_space(sizeof(*request));
25627 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25628 || __put_user(req32.param, &request->param)
25629 - || __put_user((void __user *)(unsigned long)req32.value,
25630 + || __put_user((unsigned long)req32.value,
25631 &request->value))
25632 return -EFAULT;
25633
25634 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c
25635 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
25636 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
25637 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25638 unsigned int ret;
25639 RING_LOCALS;
25640
25641 - atomic_inc(&dev_priv->swi_emitted);
25642 - ret = atomic_read(&dev_priv->swi_emitted);
25643 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25644 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25645
25646 BEGIN_RING(4);
25647 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25648 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25649 drm_radeon_private_t *dev_priv =
25650 (drm_radeon_private_t *) dev->dev_private;
25651
25652 - atomic_set(&dev_priv->swi_emitted, 0);
25653 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25654 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25655
25656 dev->max_vblank_count = 0x001fffff;
25657 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c
25658 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
25659 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
25660 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25661 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25662 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25663
25664 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25665 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25666 sarea_priv->nbox * sizeof(depth_boxes[0])))
25667 return -EFAULT;
25668
25669 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25670 {
25671 drm_radeon_private_t *dev_priv = dev->dev_private;
25672 drm_radeon_getparam_t *param = data;
25673 - int value;
25674 + int value = 0;
25675
25676 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25677
25678 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c
25679 --- linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-07-21 22:17:23.000000000 -0400
25680 +++ linux-3.0.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-23 21:47:55.000000000 -0400
25681 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25682 }
25683 if (unlikely(ttm_vm_ops == NULL)) {
25684 ttm_vm_ops = vma->vm_ops;
25685 - radeon_ttm_vm_ops = *ttm_vm_ops;
25686 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25687 + pax_open_kernel();
25688 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25689 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25690 + pax_close_kernel();
25691 }
25692 vma->vm_ops = &radeon_ttm_vm_ops;
25693 return 0;
25694 diff -urNp linux-3.0.4/drivers/gpu/drm/radeon/rs690.c linux-3.0.4/drivers/gpu/drm/radeon/rs690.c
25695 --- linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
25696 +++ linux-3.0.4/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
25697 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25698 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25699 rdev->pm.sideport_bandwidth.full)
25700 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25701 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25702 + read_delay_latency.full = dfixed_const(800 * 1000);
25703 read_delay_latency.full = dfixed_div(read_delay_latency,
25704 rdev->pm.igp_sideport_mclk);
25705 + a.full = dfixed_const(370);
25706 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25707 } else {
25708 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25709 rdev->pm.k8_bandwidth.full)
25710 diff -urNp linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25711 --- linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
25712 +++ linux-3.0.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
25713 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
25714 static int ttm_pool_mm_shrink(struct shrinker *shrink,
25715 struct shrink_control *sc)
25716 {
25717 - static atomic_t start_pool = ATOMIC_INIT(0);
25718 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25719 unsigned i;
25720 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25721 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25722 struct ttm_page_pool *pool;
25723 int shrink_pages = sc->nr_to_scan;
25724
25725 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_drv.h linux-3.0.4/drivers/gpu/drm/via/via_drv.h
25726 --- linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
25727 +++ linux-3.0.4/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
25728 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25729 typedef uint32_t maskarray_t[5];
25730
25731 typedef struct drm_via_irq {
25732 - atomic_t irq_received;
25733 + atomic_unchecked_t irq_received;
25734 uint32_t pending_mask;
25735 uint32_t enable_mask;
25736 wait_queue_head_t irq_queue;
25737 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25738 struct timeval last_vblank;
25739 int last_vblank_valid;
25740 unsigned usec_per_vblank;
25741 - atomic_t vbl_received;
25742 + atomic_unchecked_t vbl_received;
25743 drm_via_state_t hc_state;
25744 char pci_buf[VIA_PCI_BUF_SIZE];
25745 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25746 diff -urNp linux-3.0.4/drivers/gpu/drm/via/via_irq.c linux-3.0.4/drivers/gpu/drm/via/via_irq.c
25747 --- linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
25748 +++ linux-3.0.4/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
25749 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25750 if (crtc != 0)
25751 return 0;
25752
25753 - return atomic_read(&dev_priv->vbl_received);
25754 + return atomic_read_unchecked(&dev_priv->vbl_received);
25755 }
25756
25757 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25758 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25759
25760 status = VIA_READ(VIA_REG_INTERRUPT);
25761 if (status & VIA_IRQ_VBLANK_PENDING) {
25762 - atomic_inc(&dev_priv->vbl_received);
25763 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25764 + atomic_inc_unchecked(&dev_priv->vbl_received);
25765 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25766 do_gettimeofday(&cur_vblank);
25767 if (dev_priv->last_vblank_valid) {
25768 dev_priv->usec_per_vblank =
25769 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25770 dev_priv->last_vblank = cur_vblank;
25771 dev_priv->last_vblank_valid = 1;
25772 }
25773 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25774 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25775 DRM_DEBUG("US per vblank is: %u\n",
25776 dev_priv->usec_per_vblank);
25777 }
25778 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25779
25780 for (i = 0; i < dev_priv->num_irqs; ++i) {
25781 if (status & cur_irq->pending_mask) {
25782 - atomic_inc(&cur_irq->irq_received);
25783 + atomic_inc_unchecked(&cur_irq->irq_received);
25784 DRM_WAKEUP(&cur_irq->irq_queue);
25785 handled = 1;
25786 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25787 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25788 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25789 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25790 masks[irq][4]));
25791 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25792 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25793 } else {
25794 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25795 (((cur_irq_sequence =
25796 - atomic_read(&cur_irq->irq_received)) -
25797 + atomic_read_unchecked(&cur_irq->irq_received)) -
25798 *sequence) <= (1 << 23)));
25799 }
25800 *sequence = cur_irq_sequence;
25801 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25802 }
25803
25804 for (i = 0; i < dev_priv->num_irqs; ++i) {
25805 - atomic_set(&cur_irq->irq_received, 0);
25806 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25807 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25808 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25809 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25810 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25811 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25812 case VIA_IRQ_RELATIVE:
25813 irqwait->request.sequence +=
25814 - atomic_read(&cur_irq->irq_received);
25815 + atomic_read_unchecked(&cur_irq->irq_received);
25816 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25817 case VIA_IRQ_ABSOLUTE:
25818 break;
25819 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25820 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
25821 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
25822 @@ -240,7 +240,7 @@ struct vmw_private {
25823 * Fencing and IRQs.
25824 */
25825
25826 - atomic_t fence_seq;
25827 + atomic_unchecked_t fence_seq;
25828 wait_queue_head_t fence_queue;
25829 wait_queue_head_t fifo_queue;
25830 atomic_t fence_queue_waiters;
25831 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25832 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
25833 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
25834 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25835 while (!vmw_lag_lt(queue, us)) {
25836 spin_lock(&queue->lock);
25837 if (list_empty(&queue->head))
25838 - sequence = atomic_read(&dev_priv->fence_seq);
25839 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25840 else {
25841 fence = list_first_entry(&queue->head,
25842 struct vmw_fence, head);
25843 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25844 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
25845 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-23 21:47:55.000000000 -0400
25846 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25847 (unsigned int) min,
25848 (unsigned int) fifo->capabilities);
25849
25850 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25851 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25852 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25853 vmw_fence_queue_init(&fifo->fence_queue);
25854 return vmw_fifo_send_fence(dev_priv, &dummy);
25855 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25856
25857 fm = vmw_fifo_reserve(dev_priv, bytes);
25858 if (unlikely(fm == NULL)) {
25859 - *sequence = atomic_read(&dev_priv->fence_seq);
25860 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25861 ret = -ENOMEM;
25862 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25863 false, 3*HZ);
25864 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25865 }
25866
25867 do {
25868 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25869 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25870 } while (*sequence == 0);
25871
25872 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25873 diff -urNp linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25874 --- linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
25875 +++ linux-3.0.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
25876 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25877 * emitted. Then the fence is stale and signaled.
25878 */
25879
25880 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25881 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25882 > VMW_FENCE_WRAP);
25883
25884 return ret;
25885 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25886
25887 if (fifo_idle)
25888 down_read(&fifo_state->rwsem);
25889 - signal_seq = atomic_read(&dev_priv->fence_seq);
25890 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25891 ret = 0;
25892
25893 for (;;) {
25894 diff -urNp linux-3.0.4/drivers/hid/hid-core.c linux-3.0.4/drivers/hid/hid-core.c
25895 --- linux-3.0.4/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
25896 +++ linux-3.0.4/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
25897 @@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
25898
25899 int hid_add_device(struct hid_device *hdev)
25900 {
25901 - static atomic_t id = ATOMIC_INIT(0);
25902 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25903 int ret;
25904
25905 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25906 @@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
25907 /* XXX hack, any other cleaner solution after the driver core
25908 * is converted to allow more than 20 bytes as the device name? */
25909 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25910 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25911 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25912
25913 hid_debug_register(hdev, dev_name(&hdev->dev));
25914 ret = device_add(&hdev->dev);
25915 diff -urNp linux-3.0.4/drivers/hid/usbhid/hiddev.c linux-3.0.4/drivers/hid/usbhid/hiddev.c
25916 --- linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
25917 +++ linux-3.0.4/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
25918 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
25919 break;
25920
25921 case HIDIOCAPPLICATION:
25922 - if (arg < 0 || arg >= hid->maxapplication)
25923 + if (arg >= hid->maxapplication)
25924 break;
25925
25926 for (i = 0; i < hid->maxcollection; i++)
25927 diff -urNp linux-3.0.4/drivers/hwmon/acpi_power_meter.c linux-3.0.4/drivers/hwmon/acpi_power_meter.c
25928 --- linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
25929 +++ linux-3.0.4/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
25930 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
25931 return res;
25932
25933 temp /= 1000;
25934 - if (temp < 0)
25935 - return -EINVAL;
25936
25937 mutex_lock(&resource->lock);
25938 resource->trip[attr->index - 7] = temp;
25939 diff -urNp linux-3.0.4/drivers/hwmon/sht15.c linux-3.0.4/drivers/hwmon/sht15.c
25940 --- linux-3.0.4/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
25941 +++ linux-3.0.4/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
25942 @@ -166,7 +166,7 @@ struct sht15_data {
25943 int supply_uV;
25944 bool supply_uV_valid;
25945 struct work_struct update_supply_work;
25946 - atomic_t interrupt_handled;
25947 + atomic_unchecked_t interrupt_handled;
25948 };
25949
25950 /**
25951 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
25952 return ret;
25953
25954 gpio_direction_input(data->pdata->gpio_data);
25955 - atomic_set(&data->interrupt_handled, 0);
25956 + atomic_set_unchecked(&data->interrupt_handled, 0);
25957
25958 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25959 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25960 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25961 /* Only relevant if the interrupt hasn't occurred. */
25962 - if (!atomic_read(&data->interrupt_handled))
25963 + if (!atomic_read_unchecked(&data->interrupt_handled))
25964 schedule_work(&data->read_work);
25965 }
25966 ret = wait_event_timeout(data->wait_queue,
25967 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
25968
25969 /* First disable the interrupt */
25970 disable_irq_nosync(irq);
25971 - atomic_inc(&data->interrupt_handled);
25972 + atomic_inc_unchecked(&data->interrupt_handled);
25973 /* Then schedule a reading work struct */
25974 if (data->state != SHT15_READING_NOTHING)
25975 schedule_work(&data->read_work);
25976 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
25977 * If not, then start the interrupt again - care here as could
25978 * have gone low in meantime so verify it hasn't!
25979 */
25980 - atomic_set(&data->interrupt_handled, 0);
25981 + atomic_set_unchecked(&data->interrupt_handled, 0);
25982 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25983 /* If still not occurred or another handler has been scheduled */
25984 if (gpio_get_value(data->pdata->gpio_data)
25985 - || atomic_read(&data->interrupt_handled))
25986 + || atomic_read_unchecked(&data->interrupt_handled))
25987 return;
25988 }
25989
25990 diff -urNp linux-3.0.4/drivers/hwmon/w83791d.c linux-3.0.4/drivers/hwmon/w83791d.c
25991 --- linux-3.0.4/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
25992 +++ linux-3.0.4/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
25993 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25994 struct i2c_board_info *info);
25995 static int w83791d_remove(struct i2c_client *client);
25996
25997 -static int w83791d_read(struct i2c_client *client, u8 register);
25998 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25999 +static int w83791d_read(struct i2c_client *client, u8 reg);
26000 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
26001 static struct w83791d_data *w83791d_update_device(struct device *dev);
26002
26003 #ifdef DEBUG
26004 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c
26005 --- linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
26006 +++ linux-3.0.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
26007 @@ -43,7 +43,7 @@
26008 extern struct i2c_adapter amd756_smbus;
26009
26010 static struct i2c_adapter *s4882_adapter;
26011 -static struct i2c_algorithm *s4882_algo;
26012 +static i2c_algorithm_no_const *s4882_algo;
26013
26014 /* Wrapper access functions for multiplexed SMBus */
26015 static DEFINE_MUTEX(amd756_lock);
26016 diff -urNp linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c
26017 --- linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
26018 +++ linux-3.0.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
26019 @@ -41,7 +41,7 @@
26020 extern struct i2c_adapter *nforce2_smbus;
26021
26022 static struct i2c_adapter *s4985_adapter;
26023 -static struct i2c_algorithm *s4985_algo;
26024 +static i2c_algorithm_no_const *s4985_algo;
26025
26026 /* Wrapper access functions for multiplexed SMBus */
26027 static DEFINE_MUTEX(nforce2_lock);
26028 diff -urNp linux-3.0.4/drivers/i2c/i2c-mux.c linux-3.0.4/drivers/i2c/i2c-mux.c
26029 --- linux-3.0.4/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
26030 +++ linux-3.0.4/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
26031 @@ -28,7 +28,7 @@
26032 /* multiplexer per channel data */
26033 struct i2c_mux_priv {
26034 struct i2c_adapter adap;
26035 - struct i2c_algorithm algo;
26036 + i2c_algorithm_no_const algo;
26037
26038 struct i2c_adapter *parent;
26039 void *mux_dev; /* the mux chip/device */
26040 diff -urNp linux-3.0.4/drivers/ide/ide-cd.c linux-3.0.4/drivers/ide/ide-cd.c
26041 --- linux-3.0.4/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
26042 +++ linux-3.0.4/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
26043 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
26044 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
26045 if ((unsigned long)buf & alignment
26046 || blk_rq_bytes(rq) & q->dma_pad_mask
26047 - || object_is_on_stack(buf))
26048 + || object_starts_on_stack(buf))
26049 drive->dma = 0;
26050 }
26051 }
26052 diff -urNp linux-3.0.4/drivers/ide/ide-floppy.c linux-3.0.4/drivers/ide/ide-floppy.c
26053 --- linux-3.0.4/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
26054 +++ linux-3.0.4/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
26055 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
26056 u8 pc_buf[256], header_len, desc_cnt;
26057 int i, rc = 1, blocks, length;
26058
26059 + pax_track_stack();
26060 +
26061 ide_debug_log(IDE_DBG_FUNC, "enter");
26062
26063 drive->bios_cyl = 0;
26064 diff -urNp linux-3.0.4/drivers/ide/setup-pci.c linux-3.0.4/drivers/ide/setup-pci.c
26065 --- linux-3.0.4/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
26066 +++ linux-3.0.4/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
26067 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
26068 int ret, i, n_ports = dev2 ? 4 : 2;
26069 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
26070
26071 + pax_track_stack();
26072 +
26073 for (i = 0; i < n_ports / 2; i++) {
26074 ret = ide_setup_pci_controller(pdev[i], d, !i);
26075 if (ret < 0)
26076 diff -urNp linux-3.0.4/drivers/infiniband/core/cm.c linux-3.0.4/drivers/infiniband/core/cm.c
26077 --- linux-3.0.4/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
26078 +++ linux-3.0.4/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
26079 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
26080
26081 struct cm_counter_group {
26082 struct kobject obj;
26083 - atomic_long_t counter[CM_ATTR_COUNT];
26084 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
26085 };
26086
26087 struct cm_counter_attribute {
26088 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
26089 struct ib_mad_send_buf *msg = NULL;
26090 int ret;
26091
26092 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26093 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26094 counter[CM_REQ_COUNTER]);
26095
26096 /* Quick state check to discard duplicate REQs. */
26097 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
26098 if (!cm_id_priv)
26099 return;
26100
26101 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26102 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26103 counter[CM_REP_COUNTER]);
26104 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
26105 if (ret)
26106 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
26107 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
26108 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
26109 spin_unlock_irq(&cm_id_priv->lock);
26110 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26111 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26112 counter[CM_RTU_COUNTER]);
26113 goto out;
26114 }
26115 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
26116 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
26117 dreq_msg->local_comm_id);
26118 if (!cm_id_priv) {
26119 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26120 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26121 counter[CM_DREQ_COUNTER]);
26122 cm_issue_drep(work->port, work->mad_recv_wc);
26123 return -EINVAL;
26124 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
26125 case IB_CM_MRA_REP_RCVD:
26126 break;
26127 case IB_CM_TIMEWAIT:
26128 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26129 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26130 counter[CM_DREQ_COUNTER]);
26131 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26132 goto unlock;
26133 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
26134 cm_free_msg(msg);
26135 goto deref;
26136 case IB_CM_DREQ_RCVD:
26137 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26138 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26139 counter[CM_DREQ_COUNTER]);
26140 goto unlock;
26141 default:
26142 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
26143 ib_modify_mad(cm_id_priv->av.port->mad_agent,
26144 cm_id_priv->msg, timeout)) {
26145 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
26146 - atomic_long_inc(&work->port->
26147 + atomic_long_inc_unchecked(&work->port->
26148 counter_group[CM_RECV_DUPLICATES].
26149 counter[CM_MRA_COUNTER]);
26150 goto out;
26151 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
26152 break;
26153 case IB_CM_MRA_REQ_RCVD:
26154 case IB_CM_MRA_REP_RCVD:
26155 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26156 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26157 counter[CM_MRA_COUNTER]);
26158 /* fall through */
26159 default:
26160 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
26161 case IB_CM_LAP_IDLE:
26162 break;
26163 case IB_CM_MRA_LAP_SENT:
26164 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26165 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26166 counter[CM_LAP_COUNTER]);
26167 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
26168 goto unlock;
26169 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
26170 cm_free_msg(msg);
26171 goto deref;
26172 case IB_CM_LAP_RCVD:
26173 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26174 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26175 counter[CM_LAP_COUNTER]);
26176 goto unlock;
26177 default:
26178 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
26179 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
26180 if (cur_cm_id_priv) {
26181 spin_unlock_irq(&cm.lock);
26182 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
26183 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
26184 counter[CM_SIDR_REQ_COUNTER]);
26185 goto out; /* Duplicate message. */
26186 }
26187 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
26188 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
26189 msg->retries = 1;
26190
26191 - atomic_long_add(1 + msg->retries,
26192 + atomic_long_add_unchecked(1 + msg->retries,
26193 &port->counter_group[CM_XMIT].counter[attr_index]);
26194 if (msg->retries)
26195 - atomic_long_add(msg->retries,
26196 + atomic_long_add_unchecked(msg->retries,
26197 &port->counter_group[CM_XMIT_RETRIES].
26198 counter[attr_index]);
26199
26200 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
26201 }
26202
26203 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
26204 - atomic_long_inc(&port->counter_group[CM_RECV].
26205 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
26206 counter[attr_id - CM_ATTR_ID_OFFSET]);
26207
26208 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
26209 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
26210 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
26211
26212 return sprintf(buf, "%ld\n",
26213 - atomic_long_read(&group->counter[cm_attr->index]));
26214 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
26215 }
26216
26217 static const struct sysfs_ops cm_counter_ops = {
26218 diff -urNp linux-3.0.4/drivers/infiniband/core/fmr_pool.c linux-3.0.4/drivers/infiniband/core/fmr_pool.c
26219 --- linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
26220 +++ linux-3.0.4/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
26221 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
26222
26223 struct task_struct *thread;
26224
26225 - atomic_t req_ser;
26226 - atomic_t flush_ser;
26227 + atomic_unchecked_t req_ser;
26228 + atomic_unchecked_t flush_ser;
26229
26230 wait_queue_head_t force_wait;
26231 };
26232 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
26233 struct ib_fmr_pool *pool = pool_ptr;
26234
26235 do {
26236 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
26237 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
26238 ib_fmr_batch_release(pool);
26239
26240 - atomic_inc(&pool->flush_ser);
26241 + atomic_inc_unchecked(&pool->flush_ser);
26242 wake_up_interruptible(&pool->force_wait);
26243
26244 if (pool->flush_function)
26245 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
26246 }
26247
26248 set_current_state(TASK_INTERRUPTIBLE);
26249 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
26250 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
26251 !kthread_should_stop())
26252 schedule();
26253 __set_current_state(TASK_RUNNING);
26254 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
26255 pool->dirty_watermark = params->dirty_watermark;
26256 pool->dirty_len = 0;
26257 spin_lock_init(&pool->pool_lock);
26258 - atomic_set(&pool->req_ser, 0);
26259 - atomic_set(&pool->flush_ser, 0);
26260 + atomic_set_unchecked(&pool->req_ser, 0);
26261 + atomic_set_unchecked(&pool->flush_ser, 0);
26262 init_waitqueue_head(&pool->force_wait);
26263
26264 pool->thread = kthread_run(ib_fmr_cleanup_thread,
26265 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
26266 }
26267 spin_unlock_irq(&pool->pool_lock);
26268
26269 - serial = atomic_inc_return(&pool->req_ser);
26270 + serial = atomic_inc_return_unchecked(&pool->req_ser);
26271 wake_up_process(pool->thread);
26272
26273 if (wait_event_interruptible(pool->force_wait,
26274 - atomic_read(&pool->flush_ser) - serial >= 0))
26275 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
26276 return -EINTR;
26277
26278 return 0;
26279 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
26280 } else {
26281 list_add_tail(&fmr->list, &pool->dirty_list);
26282 if (++pool->dirty_len >= pool->dirty_watermark) {
26283 - atomic_inc(&pool->req_ser);
26284 + atomic_inc_unchecked(&pool->req_ser);
26285 wake_up_process(pool->thread);
26286 }
26287 }
26288 diff -urNp linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c
26289 --- linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
26290 +++ linux-3.0.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
26291 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
26292 int err;
26293 struct fw_ri_tpte tpt;
26294 u32 stag_idx;
26295 - static atomic_t key;
26296 + static atomic_unchecked_t key;
26297
26298 if (c4iw_fatal_error(rdev))
26299 return -EIO;
26300 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
26301 &rdev->resource.tpt_fifo_lock);
26302 if (!stag_idx)
26303 return -ENOMEM;
26304 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
26305 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
26306 }
26307 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
26308 __func__, stag_state, type, pdid, stag_idx);
26309 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c
26310 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
26311 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
26312 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
26313 struct infinipath_counters counters;
26314 struct ipath_devdata *dd;
26315
26316 + pax_track_stack();
26317 +
26318 dd = file->f_path.dentry->d_inode->i_private;
26319 dd->ipath_f_read_counters(dd, &counters);
26320
26321 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c
26322 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
26323 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
26324 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
26325 struct ib_atomic_eth *ateth;
26326 struct ipath_ack_entry *e;
26327 u64 vaddr;
26328 - atomic64_t *maddr;
26329 + atomic64_unchecked_t *maddr;
26330 u64 sdata;
26331 u32 rkey;
26332 u8 next;
26333 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
26334 IB_ACCESS_REMOTE_ATOMIC)))
26335 goto nack_acc_unlck;
26336 /* Perform atomic OP and save result. */
26337 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
26338 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
26339 sdata = be64_to_cpu(ateth->swap_data);
26340 e = &qp->s_ack_queue[qp->r_head_ack_queue];
26341 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
26342 - (u64) atomic64_add_return(sdata, maddr) - sdata :
26343 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
26344 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
26345 be64_to_cpu(ateth->compare_data),
26346 sdata);
26347 diff -urNp linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c
26348 --- linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
26349 +++ linux-3.0.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
26350 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
26351 unsigned long flags;
26352 struct ib_wc wc;
26353 u64 sdata;
26354 - atomic64_t *maddr;
26355 + atomic64_unchecked_t *maddr;
26356 enum ib_wc_status send_status;
26357
26358 /*
26359 @@ -382,11 +382,11 @@ again:
26360 IB_ACCESS_REMOTE_ATOMIC)))
26361 goto acc_err;
26362 /* Perform atomic OP and save result. */
26363 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
26364 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
26365 sdata = wqe->wr.wr.atomic.compare_add;
26366 *(u64 *) sqp->s_sge.sge.vaddr =
26367 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
26368 - (u64) atomic64_add_return(sdata, maddr) - sdata :
26369 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
26370 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
26371 sdata, wqe->wr.wr.atomic.swap);
26372 goto send_comp;
26373 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.c linux-3.0.4/drivers/infiniband/hw/nes/nes.c
26374 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
26375 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
26376 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
26377 LIST_HEAD(nes_adapter_list);
26378 static LIST_HEAD(nes_dev_list);
26379
26380 -atomic_t qps_destroyed;
26381 +atomic_unchecked_t qps_destroyed;
26382
26383 static unsigned int ee_flsh_adapter;
26384 static unsigned int sysfs_nonidx_addr;
26385 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
26386 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
26387 struct nes_adapter *nesadapter = nesdev->nesadapter;
26388
26389 - atomic_inc(&qps_destroyed);
26390 + atomic_inc_unchecked(&qps_destroyed);
26391
26392 /* Free the control structures */
26393
26394 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c
26395 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
26396 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
26397 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
26398 u32 cm_packets_retrans;
26399 u32 cm_packets_created;
26400 u32 cm_packets_received;
26401 -atomic_t cm_listens_created;
26402 -atomic_t cm_listens_destroyed;
26403 +atomic_unchecked_t cm_listens_created;
26404 +atomic_unchecked_t cm_listens_destroyed;
26405 u32 cm_backlog_drops;
26406 -atomic_t cm_loopbacks;
26407 -atomic_t cm_nodes_created;
26408 -atomic_t cm_nodes_destroyed;
26409 -atomic_t cm_accel_dropped_pkts;
26410 -atomic_t cm_resets_recvd;
26411 +atomic_unchecked_t cm_loopbacks;
26412 +atomic_unchecked_t cm_nodes_created;
26413 +atomic_unchecked_t cm_nodes_destroyed;
26414 +atomic_unchecked_t cm_accel_dropped_pkts;
26415 +atomic_unchecked_t cm_resets_recvd;
26416
26417 static inline int mini_cm_accelerated(struct nes_cm_core *,
26418 struct nes_cm_node *);
26419 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
26420
26421 static struct nes_cm_core *g_cm_core;
26422
26423 -atomic_t cm_connects;
26424 -atomic_t cm_accepts;
26425 -atomic_t cm_disconnects;
26426 -atomic_t cm_closes;
26427 -atomic_t cm_connecteds;
26428 -atomic_t cm_connect_reqs;
26429 -atomic_t cm_rejects;
26430 +atomic_unchecked_t cm_connects;
26431 +atomic_unchecked_t cm_accepts;
26432 +atomic_unchecked_t cm_disconnects;
26433 +atomic_unchecked_t cm_closes;
26434 +atomic_unchecked_t cm_connecteds;
26435 +atomic_unchecked_t cm_connect_reqs;
26436 +atomic_unchecked_t cm_rejects;
26437
26438
26439 /**
26440 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
26441 kfree(listener);
26442 listener = NULL;
26443 ret = 0;
26444 - atomic_inc(&cm_listens_destroyed);
26445 + atomic_inc_unchecked(&cm_listens_destroyed);
26446 } else {
26447 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
26448 }
26449 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
26450 cm_node->rem_mac);
26451
26452 add_hte_node(cm_core, cm_node);
26453 - atomic_inc(&cm_nodes_created);
26454 + atomic_inc_unchecked(&cm_nodes_created);
26455
26456 return cm_node;
26457 }
26458 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
26459 }
26460
26461 atomic_dec(&cm_core->node_cnt);
26462 - atomic_inc(&cm_nodes_destroyed);
26463 + atomic_inc_unchecked(&cm_nodes_destroyed);
26464 nesqp = cm_node->nesqp;
26465 if (nesqp) {
26466 nesqp->cm_node = NULL;
26467 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
26468
26469 static void drop_packet(struct sk_buff *skb)
26470 {
26471 - atomic_inc(&cm_accel_dropped_pkts);
26472 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26473 dev_kfree_skb_any(skb);
26474 }
26475
26476 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
26477 {
26478
26479 int reset = 0; /* whether to send reset in case of err.. */
26480 - atomic_inc(&cm_resets_recvd);
26481 + atomic_inc_unchecked(&cm_resets_recvd);
26482 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
26483 " refcnt=%d\n", cm_node, cm_node->state,
26484 atomic_read(&cm_node->ref_count));
26485 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
26486 rem_ref_cm_node(cm_node->cm_core, cm_node);
26487 return NULL;
26488 }
26489 - atomic_inc(&cm_loopbacks);
26490 + atomic_inc_unchecked(&cm_loopbacks);
26491 loopbackremotenode->loopbackpartner = cm_node;
26492 loopbackremotenode->tcp_cntxt.rcv_wscale =
26493 NES_CM_DEFAULT_RCV_WND_SCALE;
26494 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26495 add_ref_cm_node(cm_node);
26496 } else if (cm_node->state == NES_CM_STATE_TSA) {
26497 rem_ref_cm_node(cm_core, cm_node);
26498 - atomic_inc(&cm_accel_dropped_pkts);
26499 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26500 dev_kfree_skb_any(skb);
26501 break;
26502 }
26503 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26504
26505 if ((cm_id) && (cm_id->event_handler)) {
26506 if (issue_disconn) {
26507 - atomic_inc(&cm_disconnects);
26508 + atomic_inc_unchecked(&cm_disconnects);
26509 cm_event.event = IW_CM_EVENT_DISCONNECT;
26510 cm_event.status = disconn_status;
26511 cm_event.local_addr = cm_id->local_addr;
26512 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26513 }
26514
26515 if (issue_close) {
26516 - atomic_inc(&cm_closes);
26517 + atomic_inc_unchecked(&cm_closes);
26518 nes_disconnect(nesqp, 1);
26519
26520 cm_id->provider_data = nesqp;
26521 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26522
26523 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26524 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26525 - atomic_inc(&cm_accepts);
26526 + atomic_inc_unchecked(&cm_accepts);
26527
26528 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26529 netdev_refcnt_read(nesvnic->netdev));
26530 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26531
26532 struct nes_cm_core *cm_core;
26533
26534 - atomic_inc(&cm_rejects);
26535 + atomic_inc_unchecked(&cm_rejects);
26536 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26537 loopback = cm_node->loopbackpartner;
26538 cm_core = cm_node->cm_core;
26539 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26540 ntohl(cm_id->local_addr.sin_addr.s_addr),
26541 ntohs(cm_id->local_addr.sin_port));
26542
26543 - atomic_inc(&cm_connects);
26544 + atomic_inc_unchecked(&cm_connects);
26545 nesqp->active_conn = 1;
26546
26547 /* cache the cm_id in the qp */
26548 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26549 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26550 return err;
26551 }
26552 - atomic_inc(&cm_listens_created);
26553 + atomic_inc_unchecked(&cm_listens_created);
26554 }
26555
26556 cm_id->add_ref(cm_id);
26557 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26558 if (nesqp->destroyed) {
26559 return;
26560 }
26561 - atomic_inc(&cm_connecteds);
26562 + atomic_inc_unchecked(&cm_connecteds);
26563 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26564 " local port 0x%04X. jiffies = %lu.\n",
26565 nesqp->hwqp.qp_id,
26566 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26567
26568 cm_id->add_ref(cm_id);
26569 ret = cm_id->event_handler(cm_id, &cm_event);
26570 - atomic_inc(&cm_closes);
26571 + atomic_inc_unchecked(&cm_closes);
26572 cm_event.event = IW_CM_EVENT_CLOSE;
26573 cm_event.status = 0;
26574 cm_event.provider_data = cm_id->provider_data;
26575 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26576 return;
26577 cm_id = cm_node->cm_id;
26578
26579 - atomic_inc(&cm_connect_reqs);
26580 + atomic_inc_unchecked(&cm_connect_reqs);
26581 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26582 cm_node, cm_id, jiffies);
26583
26584 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26585 return;
26586 cm_id = cm_node->cm_id;
26587
26588 - atomic_inc(&cm_connect_reqs);
26589 + atomic_inc_unchecked(&cm_connect_reqs);
26590 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26591 cm_node, cm_id, jiffies);
26592
26593 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes.h linux-3.0.4/drivers/infiniband/hw/nes/nes.h
26594 --- linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
26595 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
26596 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26597 extern unsigned int wqm_quanta;
26598 extern struct list_head nes_adapter_list;
26599
26600 -extern atomic_t cm_connects;
26601 -extern atomic_t cm_accepts;
26602 -extern atomic_t cm_disconnects;
26603 -extern atomic_t cm_closes;
26604 -extern atomic_t cm_connecteds;
26605 -extern atomic_t cm_connect_reqs;
26606 -extern atomic_t cm_rejects;
26607 -extern atomic_t mod_qp_timouts;
26608 -extern atomic_t qps_created;
26609 -extern atomic_t qps_destroyed;
26610 -extern atomic_t sw_qps_destroyed;
26611 +extern atomic_unchecked_t cm_connects;
26612 +extern atomic_unchecked_t cm_accepts;
26613 +extern atomic_unchecked_t cm_disconnects;
26614 +extern atomic_unchecked_t cm_closes;
26615 +extern atomic_unchecked_t cm_connecteds;
26616 +extern atomic_unchecked_t cm_connect_reqs;
26617 +extern atomic_unchecked_t cm_rejects;
26618 +extern atomic_unchecked_t mod_qp_timouts;
26619 +extern atomic_unchecked_t qps_created;
26620 +extern atomic_unchecked_t qps_destroyed;
26621 +extern atomic_unchecked_t sw_qps_destroyed;
26622 extern u32 mh_detected;
26623 extern u32 mh_pauses_sent;
26624 extern u32 cm_packets_sent;
26625 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26626 extern u32 cm_packets_received;
26627 extern u32 cm_packets_dropped;
26628 extern u32 cm_packets_retrans;
26629 -extern atomic_t cm_listens_created;
26630 -extern atomic_t cm_listens_destroyed;
26631 +extern atomic_unchecked_t cm_listens_created;
26632 +extern atomic_unchecked_t cm_listens_destroyed;
26633 extern u32 cm_backlog_drops;
26634 -extern atomic_t cm_loopbacks;
26635 -extern atomic_t cm_nodes_created;
26636 -extern atomic_t cm_nodes_destroyed;
26637 -extern atomic_t cm_accel_dropped_pkts;
26638 -extern atomic_t cm_resets_recvd;
26639 +extern atomic_unchecked_t cm_loopbacks;
26640 +extern atomic_unchecked_t cm_nodes_created;
26641 +extern atomic_unchecked_t cm_nodes_destroyed;
26642 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26643 +extern atomic_unchecked_t cm_resets_recvd;
26644
26645 extern u32 int_mod_timer_init;
26646 extern u32 int_mod_cq_depth_256;
26647 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c
26648 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
26649 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
26650 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
26651 target_stat_values[++index] = mh_detected;
26652 target_stat_values[++index] = mh_pauses_sent;
26653 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26654 - target_stat_values[++index] = atomic_read(&cm_connects);
26655 - target_stat_values[++index] = atomic_read(&cm_accepts);
26656 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26657 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26658 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26659 - target_stat_values[++index] = atomic_read(&cm_rejects);
26660 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26661 - target_stat_values[++index] = atomic_read(&qps_created);
26662 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26663 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26664 - target_stat_values[++index] = atomic_read(&cm_closes);
26665 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26666 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26667 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26668 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26669 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26670 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26671 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26672 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26673 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26674 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26675 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26676 target_stat_values[++index] = cm_packets_sent;
26677 target_stat_values[++index] = cm_packets_bounced;
26678 target_stat_values[++index] = cm_packets_created;
26679 target_stat_values[++index] = cm_packets_received;
26680 target_stat_values[++index] = cm_packets_dropped;
26681 target_stat_values[++index] = cm_packets_retrans;
26682 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26683 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26684 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26685 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26686 target_stat_values[++index] = cm_backlog_drops;
26687 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26688 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26689 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26690 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26691 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26692 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26693 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26694 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26695 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26696 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26697 target_stat_values[++index] = nesadapter->free_4kpbl;
26698 target_stat_values[++index] = nesadapter->free_256pbl;
26699 target_stat_values[++index] = int_mod_timer_init;
26700 diff -urNp linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c
26701 --- linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
26702 +++ linux-3.0.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
26703 @@ -46,9 +46,9 @@
26704
26705 #include <rdma/ib_umem.h>
26706
26707 -atomic_t mod_qp_timouts;
26708 -atomic_t qps_created;
26709 -atomic_t sw_qps_destroyed;
26710 +atomic_unchecked_t mod_qp_timouts;
26711 +atomic_unchecked_t qps_created;
26712 +atomic_unchecked_t sw_qps_destroyed;
26713
26714 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26715
26716 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26717 if (init_attr->create_flags)
26718 return ERR_PTR(-EINVAL);
26719
26720 - atomic_inc(&qps_created);
26721 + atomic_inc_unchecked(&qps_created);
26722 switch (init_attr->qp_type) {
26723 case IB_QPT_RC:
26724 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26725 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26726 struct iw_cm_event cm_event;
26727 int ret;
26728
26729 - atomic_inc(&sw_qps_destroyed);
26730 + atomic_inc_unchecked(&sw_qps_destroyed);
26731 nesqp->destroyed = 1;
26732
26733 /* Blow away the connection if it exists. */
26734 diff -urNp linux-3.0.4/drivers/infiniband/hw/qib/qib.h linux-3.0.4/drivers/infiniband/hw/qib/qib.h
26735 --- linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
26736 +++ linux-3.0.4/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
26737 @@ -51,6 +51,7 @@
26738 #include <linux/completion.h>
26739 #include <linux/kref.h>
26740 #include <linux/sched.h>
26741 +#include <linux/slab.h>
26742
26743 #include "qib_common.h"
26744 #include "qib_verbs.h"
26745 diff -urNp linux-3.0.4/drivers/input/gameport/gameport.c linux-3.0.4/drivers/input/gameport/gameport.c
26746 --- linux-3.0.4/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
26747 +++ linux-3.0.4/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
26748 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26749 */
26750 static void gameport_init_port(struct gameport *gameport)
26751 {
26752 - static atomic_t gameport_no = ATOMIC_INIT(0);
26753 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26754
26755 __module_get(THIS_MODULE);
26756
26757 mutex_init(&gameport->drv_mutex);
26758 device_initialize(&gameport->dev);
26759 dev_set_name(&gameport->dev, "gameport%lu",
26760 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26761 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26762 gameport->dev.bus = &gameport_bus;
26763 gameport->dev.release = gameport_release_port;
26764 if (gameport->parent)
26765 diff -urNp linux-3.0.4/drivers/input/input.c linux-3.0.4/drivers/input/input.c
26766 --- linux-3.0.4/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
26767 +++ linux-3.0.4/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
26768 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
26769 */
26770 int input_register_device(struct input_dev *dev)
26771 {
26772 - static atomic_t input_no = ATOMIC_INIT(0);
26773 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26774 struct input_handler *handler;
26775 const char *path;
26776 int error;
26777 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
26778 dev->setkeycode = input_default_setkeycode;
26779
26780 dev_set_name(&dev->dev, "input%ld",
26781 - (unsigned long) atomic_inc_return(&input_no) - 1);
26782 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26783
26784 error = device_add(&dev->dev);
26785 if (error)
26786 diff -urNp linux-3.0.4/drivers/input/joystick/sidewinder.c linux-3.0.4/drivers/input/joystick/sidewinder.c
26787 --- linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
26788 +++ linux-3.0.4/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
26789 @@ -30,6 +30,7 @@
26790 #include <linux/kernel.h>
26791 #include <linux/module.h>
26792 #include <linux/slab.h>
26793 +#include <linux/sched.h>
26794 #include <linux/init.h>
26795 #include <linux/input.h>
26796 #include <linux/gameport.h>
26797 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26798 unsigned char buf[SW_LENGTH];
26799 int i;
26800
26801 + pax_track_stack();
26802 +
26803 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26804
26805 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26806 diff -urNp linux-3.0.4/drivers/input/joystick/xpad.c linux-3.0.4/drivers/input/joystick/xpad.c
26807 --- linux-3.0.4/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
26808 +++ linux-3.0.4/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
26809 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26810
26811 static int xpad_led_probe(struct usb_xpad *xpad)
26812 {
26813 - static atomic_t led_seq = ATOMIC_INIT(0);
26814 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26815 long led_no;
26816 struct xpad_led *led;
26817 struct led_classdev *led_cdev;
26818 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26819 if (!led)
26820 return -ENOMEM;
26821
26822 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26823 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26824
26825 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26826 led->xpad = xpad;
26827 diff -urNp linux-3.0.4/drivers/input/mousedev.c linux-3.0.4/drivers/input/mousedev.c
26828 --- linux-3.0.4/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
26829 +++ linux-3.0.4/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
26830 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
26831
26832 spin_unlock_irq(&client->packet_lock);
26833
26834 - if (copy_to_user(buffer, data, count))
26835 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26836 return -EFAULT;
26837
26838 return count;
26839 diff -urNp linux-3.0.4/drivers/input/serio/serio.c linux-3.0.4/drivers/input/serio/serio.c
26840 --- linux-3.0.4/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
26841 +++ linux-3.0.4/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
26842 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26843 */
26844 static void serio_init_port(struct serio *serio)
26845 {
26846 - static atomic_t serio_no = ATOMIC_INIT(0);
26847 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26848
26849 __module_get(THIS_MODULE);
26850
26851 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26852 mutex_init(&serio->drv_mutex);
26853 device_initialize(&serio->dev);
26854 dev_set_name(&serio->dev, "serio%ld",
26855 - (long)atomic_inc_return(&serio_no) - 1);
26856 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26857 serio->dev.bus = &serio_bus;
26858 serio->dev.release = serio_release_port;
26859 serio->dev.groups = serio_device_attr_groups;
26860 diff -urNp linux-3.0.4/drivers/isdn/capi/capi.c linux-3.0.4/drivers/isdn/capi/capi.c
26861 --- linux-3.0.4/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
26862 +++ linux-3.0.4/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
26863 @@ -83,8 +83,8 @@ struct capiminor {
26864
26865 struct capi20_appl *ap;
26866 u32 ncci;
26867 - atomic_t datahandle;
26868 - atomic_t msgid;
26869 + atomic_unchecked_t datahandle;
26870 + atomic_unchecked_t msgid;
26871
26872 struct tty_port port;
26873 int ttyinstop;
26874 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
26875 capimsg_setu16(s, 2, mp->ap->applid);
26876 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26877 capimsg_setu8 (s, 5, CAPI_RESP);
26878 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26879 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26880 capimsg_setu32(s, 8, mp->ncci);
26881 capimsg_setu16(s, 12, datahandle);
26882 }
26883 @@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
26884 mp->outbytes -= len;
26885 spin_unlock_bh(&mp->outlock);
26886
26887 - datahandle = atomic_inc_return(&mp->datahandle);
26888 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26889 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26890 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26891 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26892 capimsg_setu16(skb->data, 2, mp->ap->applid);
26893 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26894 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26895 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26896 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26897 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26898 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26899 capimsg_setu16(skb->data, 16, len); /* Data length */
26900 diff -urNp linux-3.0.4/drivers/isdn/gigaset/common.c linux-3.0.4/drivers/isdn/gigaset/common.c
26901 --- linux-3.0.4/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
26902 +++ linux-3.0.4/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
26903 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26904 cs->commands_pending = 0;
26905 cs->cur_at_seq = 0;
26906 cs->gotfwver = -1;
26907 - cs->open_count = 0;
26908 + local_set(&cs->open_count, 0);
26909 cs->dev = NULL;
26910 cs->tty = NULL;
26911 cs->tty_dev = NULL;
26912 diff -urNp linux-3.0.4/drivers/isdn/gigaset/gigaset.h linux-3.0.4/drivers/isdn/gigaset/gigaset.h
26913 --- linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
26914 +++ linux-3.0.4/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
26915 @@ -35,6 +35,7 @@
26916 #include <linux/tty_driver.h>
26917 #include <linux/list.h>
26918 #include <asm/atomic.h>
26919 +#include <asm/local.h>
26920
26921 #define GIG_VERSION {0, 5, 0, 0}
26922 #define GIG_COMPAT {0, 4, 0, 0}
26923 @@ -433,7 +434,7 @@ struct cardstate {
26924 spinlock_t cmdlock;
26925 unsigned curlen, cmdbytes;
26926
26927 - unsigned open_count;
26928 + local_t open_count;
26929 struct tty_struct *tty;
26930 struct tasklet_struct if_wake_tasklet;
26931 unsigned control_state;
26932 diff -urNp linux-3.0.4/drivers/isdn/gigaset/interface.c linux-3.0.4/drivers/isdn/gigaset/interface.c
26933 --- linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
26934 +++ linux-3.0.4/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
26935 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
26936 }
26937 tty->driver_data = cs;
26938
26939 - ++cs->open_count;
26940 -
26941 - if (cs->open_count == 1) {
26942 + if (local_inc_return(&cs->open_count) == 1) {
26943 spin_lock_irqsave(&cs->lock, flags);
26944 cs->tty = tty;
26945 spin_unlock_irqrestore(&cs->lock, flags);
26946 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
26947
26948 if (!cs->connected)
26949 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26950 - else if (!cs->open_count)
26951 + else if (!local_read(&cs->open_count))
26952 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26953 else {
26954 - if (!--cs->open_count) {
26955 + if (!local_dec_return(&cs->open_count)) {
26956 spin_lock_irqsave(&cs->lock, flags);
26957 cs->tty = NULL;
26958 spin_unlock_irqrestore(&cs->lock, flags);
26959 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
26960 if (!cs->connected) {
26961 gig_dbg(DEBUG_IF, "not connected");
26962 retval = -ENODEV;
26963 - } else if (!cs->open_count)
26964 + } else if (!local_read(&cs->open_count))
26965 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26966 else {
26967 retval = 0;
26968 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
26969 retval = -ENODEV;
26970 goto done;
26971 }
26972 - if (!cs->open_count) {
26973 + if (!local_read(&cs->open_count)) {
26974 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26975 retval = -ENODEV;
26976 goto done;
26977 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
26978 if (!cs->connected) {
26979 gig_dbg(DEBUG_IF, "not connected");
26980 retval = -ENODEV;
26981 - } else if (!cs->open_count)
26982 + } else if (!local_read(&cs->open_count))
26983 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26984 else if (cs->mstate != MS_LOCKED) {
26985 dev_warn(cs->dev, "can't write to unlocked device\n");
26986 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
26987
26988 if (!cs->connected)
26989 gig_dbg(DEBUG_IF, "not connected");
26990 - else if (!cs->open_count)
26991 + else if (!local_read(&cs->open_count))
26992 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26993 else if (cs->mstate != MS_LOCKED)
26994 dev_warn(cs->dev, "can't write to unlocked device\n");
26995 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
26996
26997 if (!cs->connected)
26998 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26999 - else if (!cs->open_count)
27000 + else if (!local_read(&cs->open_count))
27001 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27002 else
27003 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27004 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
27005
27006 if (!cs->connected)
27007 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
27008 - else if (!cs->open_count)
27009 + else if (!local_read(&cs->open_count))
27010 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27011 else
27012 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
27013 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
27014 goto out;
27015 }
27016
27017 - if (!cs->open_count) {
27018 + if (!local_read(&cs->open_count)) {
27019 dev_warn(cs->dev, "%s: device not opened\n", __func__);
27020 goto out;
27021 }
27022 diff -urNp linux-3.0.4/drivers/isdn/hardware/avm/b1.c linux-3.0.4/drivers/isdn/hardware/avm/b1.c
27023 --- linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
27024 +++ linux-3.0.4/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
27025 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
27026 }
27027 if (left) {
27028 if (t4file->user) {
27029 - if (copy_from_user(buf, dp, left))
27030 + if (left > sizeof buf || copy_from_user(buf, dp, left))
27031 return -EFAULT;
27032 } else {
27033 memcpy(buf, dp, left);
27034 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
27035 }
27036 if (left) {
27037 if (config->user) {
27038 - if (copy_from_user(buf, dp, left))
27039 + if (left > sizeof buf || copy_from_user(buf, dp, left))
27040 return -EFAULT;
27041 } else {
27042 memcpy(buf, dp, left);
27043 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c
27044 --- linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
27045 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
27046 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
27047 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
27048 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
27049
27050 + pax_track_stack();
27051
27052 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
27053 {
27054 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c
27055 --- linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
27056 +++ linux-3.0.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
27057 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
27058 IDI_SYNC_REQ req;
27059 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27060
27061 + pax_track_stack();
27062 +
27063 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27064
27065 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27066 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c
27067 --- linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
27068 +++ linux-3.0.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
27069 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
27070 IDI_SYNC_REQ req;
27071 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27072
27073 + pax_track_stack();
27074 +
27075 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27076
27077 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27078 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c
27079 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
27080 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
27081 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
27082 IDI_SYNC_REQ req;
27083 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27084
27085 + pax_track_stack();
27086 +
27087 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27088
27089 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27090 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h
27091 --- linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
27092 +++ linux-3.0.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
27093 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
27094 } diva_didd_add_adapter_t;
27095 typedef struct _diva_didd_remove_adapter {
27096 IDI_CALL p_request;
27097 -} diva_didd_remove_adapter_t;
27098 +} __no_const diva_didd_remove_adapter_t;
27099 typedef struct _diva_didd_read_adapter_array {
27100 void * buffer;
27101 dword length;
27102 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c
27103 --- linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
27104 +++ linux-3.0.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
27105 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
27106 IDI_SYNC_REQ req;
27107 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27108
27109 + pax_track_stack();
27110 +
27111 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27112
27113 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27114 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/message.c linux-3.0.4/drivers/isdn/hardware/eicon/message.c
27115 --- linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
27116 +++ linux-3.0.4/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
27117 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
27118 dword d;
27119 word w;
27120
27121 + pax_track_stack();
27122 +
27123 a = plci->adapter;
27124 Id = ((word)plci->Id<<8)|a->Id;
27125 PUT_WORD(&SS_Ind[4],0x0000);
27126 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
27127 word j, n, w;
27128 dword d;
27129
27130 + pax_track_stack();
27131 +
27132
27133 for(i=0;i<8;i++) bp_parms[i].length = 0;
27134 for(i=0;i<2;i++) global_config[i].length = 0;
27135 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
27136 const byte llc3[] = {4,3,2,2,6,6,0};
27137 const byte header[] = {0,2,3,3,0,0,0};
27138
27139 + pax_track_stack();
27140 +
27141 for(i=0;i<8;i++) bp_parms[i].length = 0;
27142 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
27143 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
27144 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
27145 word appl_number_group_type[MAX_APPL];
27146 PLCI *auxplci;
27147
27148 + pax_track_stack();
27149 +
27150 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
27151
27152 if(!a->group_optimization_enabled)
27153 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c
27154 --- linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
27155 +++ linux-3.0.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
27156 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
27157 IDI_SYNC_REQ req;
27158 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
27159
27160 + pax_track_stack();
27161 +
27162 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
27163
27164 for (x = 0; x < MAX_DESCRIPTORS; x++) {
27165 diff -urNp linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h
27166 --- linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
27167 +++ linux-3.0.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
27168 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
27169 typedef struct _diva_os_idi_adapter_interface {
27170 diva_init_card_proc_t cleanup_adapter_proc;
27171 diva_cmd_card_proc_t cmd_proc;
27172 -} diva_os_idi_adapter_interface_t;
27173 +} __no_const diva_os_idi_adapter_interface_t;
27174
27175 typedef struct _diva_os_xdi_adapter {
27176 struct list_head link;
27177 diff -urNp linux-3.0.4/drivers/isdn/i4l/isdn_common.c linux-3.0.4/drivers/isdn/i4l/isdn_common.c
27178 --- linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
27179 +++ linux-3.0.4/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
27180 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
27181 } iocpar;
27182 void __user *argp = (void __user *)arg;
27183
27184 + pax_track_stack();
27185 +
27186 #define name iocpar.name
27187 #define bname iocpar.bname
27188 #define iocts iocpar.iocts
27189 diff -urNp linux-3.0.4/drivers/isdn/icn/icn.c linux-3.0.4/drivers/isdn/icn/icn.c
27190 --- linux-3.0.4/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
27191 +++ linux-3.0.4/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
27192 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
27193 if (count > len)
27194 count = len;
27195 if (user) {
27196 - if (copy_from_user(msg, buf, count))
27197 + if (count > sizeof msg || copy_from_user(msg, buf, count))
27198 return -EFAULT;
27199 } else
27200 memcpy(msg, buf, count);
27201 diff -urNp linux-3.0.4/drivers/lguest/core.c linux-3.0.4/drivers/lguest/core.c
27202 --- linux-3.0.4/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
27203 +++ linux-3.0.4/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
27204 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
27205 * it's worked so far. The end address needs +1 because __get_vm_area
27206 * allocates an extra guard page, so we need space for that.
27207 */
27208 +
27209 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27210 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27211 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
27212 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27213 +#else
27214 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
27215 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
27216 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
27217 +#endif
27218 +
27219 if (!switcher_vma) {
27220 err = -ENOMEM;
27221 printk("lguest: could not map switcher pages high\n");
27222 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
27223 * Now the Switcher is mapped at the right address, we can't fail!
27224 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
27225 */
27226 - memcpy(switcher_vma->addr, start_switcher_text,
27227 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
27228 end_switcher_text - start_switcher_text);
27229
27230 printk(KERN_INFO "lguest: mapped switcher at %p\n",
27231 diff -urNp linux-3.0.4/drivers/lguest/x86/core.c linux-3.0.4/drivers/lguest/x86/core.c
27232 --- linux-3.0.4/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
27233 +++ linux-3.0.4/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
27234 @@ -59,7 +59,7 @@ static struct {
27235 /* Offset from where switcher.S was compiled to where we've copied it */
27236 static unsigned long switcher_offset(void)
27237 {
27238 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
27239 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
27240 }
27241
27242 /* This cpu's struct lguest_pages. */
27243 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
27244 * These copies are pretty cheap, so we do them unconditionally: */
27245 /* Save the current Host top-level page directory.
27246 */
27247 +
27248 +#ifdef CONFIG_PAX_PER_CPU_PGD
27249 + pages->state.host_cr3 = read_cr3();
27250 +#else
27251 pages->state.host_cr3 = __pa(current->mm->pgd);
27252 +#endif
27253 +
27254 /*
27255 * Set up the Guest's page tables to see this CPU's pages (and no
27256 * other CPU's pages).
27257 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
27258 * compiled-in switcher code and the high-mapped copy we just made.
27259 */
27260 for (i = 0; i < IDT_ENTRIES; i++)
27261 - default_idt_entries[i] += switcher_offset();
27262 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
27263
27264 /*
27265 * Set up the Switcher's per-cpu areas.
27266 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
27267 * it will be undisturbed when we switch. To change %cs and jump we
27268 * need this structure to feed to Intel's "lcall" instruction.
27269 */
27270 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
27271 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
27272 lguest_entry.segment = LGUEST_CS;
27273
27274 /*
27275 diff -urNp linux-3.0.4/drivers/lguest/x86/switcher_32.S linux-3.0.4/drivers/lguest/x86/switcher_32.S
27276 --- linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
27277 +++ linux-3.0.4/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
27278 @@ -87,6 +87,7 @@
27279 #include <asm/page.h>
27280 #include <asm/segment.h>
27281 #include <asm/lguest.h>
27282 +#include <asm/processor-flags.h>
27283
27284 // We mark the start of the code to copy
27285 // It's placed in .text tho it's never run here
27286 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
27287 // Changes type when we load it: damn Intel!
27288 // For after we switch over our page tables
27289 // That entry will be read-only: we'd crash.
27290 +
27291 +#ifdef CONFIG_PAX_KERNEXEC
27292 + mov %cr0, %edx
27293 + xor $X86_CR0_WP, %edx
27294 + mov %edx, %cr0
27295 +#endif
27296 +
27297 movl $(GDT_ENTRY_TSS*8), %edx
27298 ltr %dx
27299
27300 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
27301 // Let's clear it again for our return.
27302 // The GDT descriptor of the Host
27303 // Points to the table after two "size" bytes
27304 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
27305 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
27306 // Clear "used" from type field (byte 5, bit 2)
27307 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
27308 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
27309 +
27310 +#ifdef CONFIG_PAX_KERNEXEC
27311 + mov %cr0, %eax
27312 + xor $X86_CR0_WP, %eax
27313 + mov %eax, %cr0
27314 +#endif
27315
27316 // Once our page table's switched, the Guest is live!
27317 // The Host fades as we run this final step.
27318 @@ -295,13 +309,12 @@ deliver_to_host:
27319 // I consulted gcc, and it gave
27320 // These instructions, which I gladly credit:
27321 leal (%edx,%ebx,8), %eax
27322 - movzwl (%eax),%edx
27323 - movl 4(%eax), %eax
27324 - xorw %ax, %ax
27325 - orl %eax, %edx
27326 + movl 4(%eax), %edx
27327 + movw (%eax), %dx
27328 // Now the address of the handler's in %edx
27329 // We call it now: its "iret" drops us home.
27330 - jmp *%edx
27331 + ljmp $__KERNEL_CS, $1f
27332 +1: jmp *%edx
27333
27334 // Every interrupt can come to us here
27335 // But we must truly tell each apart.
27336 diff -urNp linux-3.0.4/drivers/md/dm.c linux-3.0.4/drivers/md/dm.c
27337 --- linux-3.0.4/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
27338 +++ linux-3.0.4/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
27339 @@ -164,9 +164,9 @@ struct mapped_device {
27340 /*
27341 * Event handling.
27342 */
27343 - atomic_t event_nr;
27344 + atomic_unchecked_t event_nr;
27345 wait_queue_head_t eventq;
27346 - atomic_t uevent_seq;
27347 + atomic_unchecked_t uevent_seq;
27348 struct list_head uevent_list;
27349 spinlock_t uevent_lock; /* Protect access to uevent_list */
27350
27351 @@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
27352 rwlock_init(&md->map_lock);
27353 atomic_set(&md->holders, 1);
27354 atomic_set(&md->open_count, 0);
27355 - atomic_set(&md->event_nr, 0);
27356 - atomic_set(&md->uevent_seq, 0);
27357 + atomic_set_unchecked(&md->event_nr, 0);
27358 + atomic_set_unchecked(&md->uevent_seq, 0);
27359 INIT_LIST_HEAD(&md->uevent_list);
27360 spin_lock_init(&md->uevent_lock);
27361
27362 @@ -1977,7 +1977,7 @@ static void event_callback(void *context
27363
27364 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
27365
27366 - atomic_inc(&md->event_nr);
27367 + atomic_inc_unchecked(&md->event_nr);
27368 wake_up(&md->eventq);
27369 }
27370
27371 @@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
27372
27373 uint32_t dm_next_uevent_seq(struct mapped_device *md)
27374 {
27375 - return atomic_add_return(1, &md->uevent_seq);
27376 + return atomic_add_return_unchecked(1, &md->uevent_seq);
27377 }
27378
27379 uint32_t dm_get_event_nr(struct mapped_device *md)
27380 {
27381 - return atomic_read(&md->event_nr);
27382 + return atomic_read_unchecked(&md->event_nr);
27383 }
27384
27385 int dm_wait_event(struct mapped_device *md, int event_nr)
27386 {
27387 return wait_event_interruptible(md->eventq,
27388 - (event_nr != atomic_read(&md->event_nr)));
27389 + (event_nr != atomic_read_unchecked(&md->event_nr)));
27390 }
27391
27392 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
27393 diff -urNp linux-3.0.4/drivers/md/dm-ioctl.c linux-3.0.4/drivers/md/dm-ioctl.c
27394 --- linux-3.0.4/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
27395 +++ linux-3.0.4/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
27396 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
27397 cmd == DM_LIST_VERSIONS_CMD)
27398 return 0;
27399
27400 - if ((cmd == DM_DEV_CREATE_CMD)) {
27401 + if (cmd == DM_DEV_CREATE_CMD) {
27402 if (!*param->name) {
27403 DMWARN("name not supplied when creating device");
27404 return -EINVAL;
27405 diff -urNp linux-3.0.4/drivers/md/dm-raid1.c linux-3.0.4/drivers/md/dm-raid1.c
27406 --- linux-3.0.4/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
27407 +++ linux-3.0.4/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
27408 @@ -40,7 +40,7 @@ enum dm_raid1_error {
27409
27410 struct mirror {
27411 struct mirror_set *ms;
27412 - atomic_t error_count;
27413 + atomic_unchecked_t error_count;
27414 unsigned long error_type;
27415 struct dm_dev *dev;
27416 sector_t offset;
27417 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
27418 struct mirror *m;
27419
27420 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
27421 - if (!atomic_read(&m->error_count))
27422 + if (!atomic_read_unchecked(&m->error_count))
27423 return m;
27424
27425 return NULL;
27426 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
27427 * simple way to tell if a device has encountered
27428 * errors.
27429 */
27430 - atomic_inc(&m->error_count);
27431 + atomic_inc_unchecked(&m->error_count);
27432
27433 if (test_and_set_bit(error_type, &m->error_type))
27434 return;
27435 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
27436 struct mirror *m = get_default_mirror(ms);
27437
27438 do {
27439 - if (likely(!atomic_read(&m->error_count)))
27440 + if (likely(!atomic_read_unchecked(&m->error_count)))
27441 return m;
27442
27443 if (m-- == ms->mirror)
27444 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
27445 {
27446 struct mirror *default_mirror = get_default_mirror(m->ms);
27447
27448 - return !atomic_read(&default_mirror->error_count);
27449 + return !atomic_read_unchecked(&default_mirror->error_count);
27450 }
27451
27452 static int mirror_available(struct mirror_set *ms, struct bio *bio)
27453 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
27454 */
27455 if (likely(region_in_sync(ms, region, 1)))
27456 m = choose_mirror(ms, bio->bi_sector);
27457 - else if (m && atomic_read(&m->error_count))
27458 + else if (m && atomic_read_unchecked(&m->error_count))
27459 m = NULL;
27460
27461 if (likely(m))
27462 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
27463 }
27464
27465 ms->mirror[mirror].ms = ms;
27466 - atomic_set(&(ms->mirror[mirror].error_count), 0);
27467 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
27468 ms->mirror[mirror].error_type = 0;
27469 ms->mirror[mirror].offset = offset;
27470
27471 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
27472 */
27473 static char device_status_char(struct mirror *m)
27474 {
27475 - if (!atomic_read(&(m->error_count)))
27476 + if (!atomic_read_unchecked(&(m->error_count)))
27477 return 'A';
27478
27479 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
27480 diff -urNp linux-3.0.4/drivers/md/dm-stripe.c linux-3.0.4/drivers/md/dm-stripe.c
27481 --- linux-3.0.4/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
27482 +++ linux-3.0.4/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
27483 @@ -20,7 +20,7 @@ struct stripe {
27484 struct dm_dev *dev;
27485 sector_t physical_start;
27486
27487 - atomic_t error_count;
27488 + atomic_unchecked_t error_count;
27489 };
27490
27491 struct stripe_c {
27492 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27493 kfree(sc);
27494 return r;
27495 }
27496 - atomic_set(&(sc->stripe[i].error_count), 0);
27497 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27498 }
27499
27500 ti->private = sc;
27501 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27502 DMEMIT("%d ", sc->stripes);
27503 for (i = 0; i < sc->stripes; i++) {
27504 DMEMIT("%s ", sc->stripe[i].dev->name);
27505 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27506 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27507 'D' : 'A';
27508 }
27509 buffer[i] = '\0';
27510 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27511 */
27512 for (i = 0; i < sc->stripes; i++)
27513 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27514 - atomic_inc(&(sc->stripe[i].error_count));
27515 - if (atomic_read(&(sc->stripe[i].error_count)) <
27516 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
27517 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27518 DM_IO_ERROR_THRESHOLD)
27519 schedule_work(&sc->trigger_event);
27520 }
27521 diff -urNp linux-3.0.4/drivers/md/dm-table.c linux-3.0.4/drivers/md/dm-table.c
27522 --- linux-3.0.4/drivers/md/dm-table.c 2011-07-21 22:17:23.000000000 -0400
27523 +++ linux-3.0.4/drivers/md/dm-table.c 2011-08-23 21:47:55.000000000 -0400
27524 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27525 if (!dev_size)
27526 return 0;
27527
27528 - if ((start >= dev_size) || (start + len > dev_size)) {
27529 + if ((start >= dev_size) || (len > dev_size - start)) {
27530 DMWARN("%s: %s too small for target: "
27531 "start=%llu, len=%llu, dev_size=%llu",
27532 dm_device_name(ti->table->md), bdevname(bdev, b),
27533 diff -urNp linux-3.0.4/drivers/md/md.c linux-3.0.4/drivers/md/md.c
27534 --- linux-3.0.4/drivers/md/md.c 2011-07-21 22:17:23.000000000 -0400
27535 +++ linux-3.0.4/drivers/md/md.c 2011-08-23 21:47:55.000000000 -0400
27536 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27537 * start build, activate spare
27538 */
27539 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27540 -static atomic_t md_event_count;
27541 +static atomic_unchecked_t md_event_count;
27542 void md_new_event(mddev_t *mddev)
27543 {
27544 - atomic_inc(&md_event_count);
27545 + atomic_inc_unchecked(&md_event_count);
27546 wake_up(&md_event_waiters);
27547 }
27548 EXPORT_SYMBOL_GPL(md_new_event);
27549 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27550 */
27551 static void md_new_event_inintr(mddev_t *mddev)
27552 {
27553 - atomic_inc(&md_event_count);
27554 + atomic_inc_unchecked(&md_event_count);
27555 wake_up(&md_event_waiters);
27556 }
27557
27558 @@ -1457,7 +1457,7 @@ static int super_1_load(mdk_rdev_t *rdev
27559
27560 rdev->preferred_minor = 0xffff;
27561 rdev->data_offset = le64_to_cpu(sb->data_offset);
27562 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27563 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27564
27565 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27566 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27567 @@ -1635,7 +1635,7 @@ static void super_1_sync(mddev_t *mddev,
27568 else
27569 sb->resync_offset = cpu_to_le64(0);
27570
27571 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27572 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27573
27574 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27575 sb->size = cpu_to_le64(mddev->dev_sectors);
27576 @@ -2428,7 +2428,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27577 static ssize_t
27578 errors_show(mdk_rdev_t *rdev, char *page)
27579 {
27580 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27581 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27582 }
27583
27584 static ssize_t
27585 @@ -2437,7 +2437,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27586 char *e;
27587 unsigned long n = simple_strtoul(buf, &e, 10);
27588 if (*buf && (*e == 0 || *e == '\n')) {
27589 - atomic_set(&rdev->corrected_errors, n);
27590 + atomic_set_unchecked(&rdev->corrected_errors, n);
27591 return len;
27592 }
27593 return -EINVAL;
27594 @@ -2793,8 +2793,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27595 rdev->last_read_error.tv_sec = 0;
27596 rdev->last_read_error.tv_nsec = 0;
27597 atomic_set(&rdev->nr_pending, 0);
27598 - atomic_set(&rdev->read_errors, 0);
27599 - atomic_set(&rdev->corrected_errors, 0);
27600 + atomic_set_unchecked(&rdev->read_errors, 0);
27601 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27602
27603 INIT_LIST_HEAD(&rdev->same_set);
27604 init_waitqueue_head(&rdev->blocked_wait);
27605 @@ -6415,7 +6415,7 @@ static int md_seq_show(struct seq_file *
27606
27607 spin_unlock(&pers_lock);
27608 seq_printf(seq, "\n");
27609 - mi->event = atomic_read(&md_event_count);
27610 + mi->event = atomic_read_unchecked(&md_event_count);
27611 return 0;
27612 }
27613 if (v == (void*)2) {
27614 @@ -6504,7 +6504,7 @@ static int md_seq_show(struct seq_file *
27615 chunk_kb ? "KB" : "B");
27616 if (bitmap->file) {
27617 seq_printf(seq, ", file: ");
27618 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27619 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27620 }
27621
27622 seq_printf(seq, "\n");
27623 @@ -6538,7 +6538,7 @@ static int md_seq_open(struct inode *ino
27624 else {
27625 struct seq_file *p = file->private_data;
27626 p->private = mi;
27627 - mi->event = atomic_read(&md_event_count);
27628 + mi->event = atomic_read_unchecked(&md_event_count);
27629 }
27630 return error;
27631 }
27632 @@ -6554,7 +6554,7 @@ static unsigned int mdstat_poll(struct f
27633 /* always allow read */
27634 mask = POLLIN | POLLRDNORM;
27635
27636 - if (mi->event != atomic_read(&md_event_count))
27637 + if (mi->event != atomic_read_unchecked(&md_event_count))
27638 mask |= POLLERR | POLLPRI;
27639 return mask;
27640 }
27641 @@ -6598,7 +6598,7 @@ static int is_mddev_idle(mddev_t *mddev,
27642 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27643 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27644 (int)part_stat_read(&disk->part0, sectors[1]) -
27645 - atomic_read(&disk->sync_io);
27646 + atomic_read_unchecked(&disk->sync_io);
27647 /* sync IO will cause sync_io to increase before the disk_stats
27648 * as sync_io is counted when a request starts, and
27649 * disk_stats is counted when it completes.
27650 diff -urNp linux-3.0.4/drivers/md/md.h linux-3.0.4/drivers/md/md.h
27651 --- linux-3.0.4/drivers/md/md.h 2011-07-21 22:17:23.000000000 -0400
27652 +++ linux-3.0.4/drivers/md/md.h 2011-08-23 21:47:55.000000000 -0400
27653 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27654 * only maintained for arrays that
27655 * support hot removal
27656 */
27657 - atomic_t read_errors; /* number of consecutive read errors that
27658 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27659 * we have tried to ignore.
27660 */
27661 struct timespec last_read_error; /* monotonic time since our
27662 * last read error
27663 */
27664 - atomic_t corrected_errors; /* number of corrected read errors,
27665 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27666 * for reporting to userspace and storing
27667 * in superblock.
27668 */
27669 @@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
27670
27671 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27672 {
27673 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27674 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27675 }
27676
27677 struct mdk_personality
27678 diff -urNp linux-3.0.4/drivers/md/raid10.c linux-3.0.4/drivers/md/raid10.c
27679 --- linux-3.0.4/drivers/md/raid10.c 2011-07-21 22:17:23.000000000 -0400
27680 +++ linux-3.0.4/drivers/md/raid10.c 2011-08-23 21:47:55.000000000 -0400
27681 @@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
27682 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27683 set_bit(R10BIO_Uptodate, &r10_bio->state);
27684 else {
27685 - atomic_add(r10_bio->sectors,
27686 + atomic_add_unchecked(r10_bio->sectors,
27687 &conf->mirrors[d].rdev->corrected_errors);
27688 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27689 md_error(r10_bio->mddev,
27690 @@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
27691 {
27692 struct timespec cur_time_mon;
27693 unsigned long hours_since_last;
27694 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27695 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27696
27697 ktime_get_ts(&cur_time_mon);
27698
27699 @@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
27700 * overflowing the shift of read_errors by hours_since_last.
27701 */
27702 if (hours_since_last >= 8 * sizeof(read_errors))
27703 - atomic_set(&rdev->read_errors, 0);
27704 + atomic_set_unchecked(&rdev->read_errors, 0);
27705 else
27706 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27707 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27708 }
27709
27710 /*
27711 @@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
27712 return;
27713
27714 check_decay_read_errors(mddev, rdev);
27715 - atomic_inc(&rdev->read_errors);
27716 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
27717 + atomic_inc_unchecked(&rdev->read_errors);
27718 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
27719 char b[BDEVNAME_SIZE];
27720 bdevname(rdev->bdev, b);
27721
27722 @@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
27723 "md/raid10:%s: %s: Raid device exceeded "
27724 "read_error threshold [cur %d:max %d]\n",
27725 mdname(mddev), b,
27726 - atomic_read(&rdev->read_errors), max_read_errors);
27727 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
27728 printk(KERN_NOTICE
27729 "md/raid10:%s: %s: Failing raid device\n",
27730 mdname(mddev), b);
27731 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
27732 test_bit(In_sync, &rdev->flags)) {
27733 atomic_inc(&rdev->nr_pending);
27734 rcu_read_unlock();
27735 - atomic_add(s, &rdev->corrected_errors);
27736 + atomic_add_unchecked(s, &rdev->corrected_errors);
27737 if (sync_page_io(rdev,
27738 r10_bio->devs[sl].addr +
27739 sect,
27740 diff -urNp linux-3.0.4/drivers/md/raid1.c linux-3.0.4/drivers/md/raid1.c
27741 --- linux-3.0.4/drivers/md/raid1.c 2011-07-21 22:17:23.000000000 -0400
27742 +++ linux-3.0.4/drivers/md/raid1.c 2011-08-23 21:47:55.000000000 -0400
27743 @@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
27744 rdev_dec_pending(rdev, mddev);
27745 md_error(mddev, rdev);
27746 } else
27747 - atomic_add(s, &rdev->corrected_errors);
27748 + atomic_add_unchecked(s, &rdev->corrected_errors);
27749 }
27750 d = start;
27751 while (d != r1_bio->read_disk) {
27752 @@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
27753 /* Well, this device is dead */
27754 md_error(mddev, rdev);
27755 else {
27756 - atomic_add(s, &rdev->corrected_errors);
27757 + atomic_add_unchecked(s, &rdev->corrected_errors);
27758 printk(KERN_INFO
27759 "md/raid1:%s: read error corrected "
27760 "(%d sectors at %llu on %s)\n",
27761 diff -urNp linux-3.0.4/drivers/md/raid5.c linux-3.0.4/drivers/md/raid5.c
27762 --- linux-3.0.4/drivers/md/raid5.c 2011-07-21 22:17:23.000000000 -0400
27763 +++ linux-3.0.4/drivers/md/raid5.c 2011-08-23 21:48:14.000000000 -0400
27764 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27765 bi->bi_next = NULL;
27766 if ((rw & WRITE) &&
27767 test_bit(R5_ReWrite, &sh->dev[i].flags))
27768 - atomic_add(STRIPE_SECTORS,
27769 + atomic_add_unchecked(STRIPE_SECTORS,
27770 &rdev->corrected_errors);
27771 generic_make_request(bi);
27772 } else {
27773 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27774 clear_bit(R5_ReadError, &sh->dev[i].flags);
27775 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27776 }
27777 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27778 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27779 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27780 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27781 } else {
27782 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27783 int retry = 0;
27784 rdev = conf->disks[i].rdev;
27785
27786 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27787 - atomic_inc(&rdev->read_errors);
27788 + atomic_inc_unchecked(&rdev->read_errors);
27789 if (conf->mddev->degraded >= conf->max_degraded)
27790 printk_rl(KERN_WARNING
27791 "md/raid:%s: read error not correctable "
27792 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27793 (unsigned long long)(sh->sector
27794 + rdev->data_offset),
27795 bdn);
27796 - else if (atomic_read(&rdev->read_errors)
27797 + else if (atomic_read_unchecked(&rdev->read_errors)
27798 > conf->max_nr_stripes)
27799 printk(KERN_WARNING
27800 "md/raid:%s: Too many read errors, failing device %s.\n",
27801 @@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
27802 sector_t r_sector;
27803 struct stripe_head sh2;
27804
27805 + pax_track_stack();
27806
27807 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27808 stripe = new_sector;
27809 diff -urNp linux-3.0.4/drivers/media/common/saa7146_hlp.c linux-3.0.4/drivers/media/common/saa7146_hlp.c
27810 --- linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
27811 +++ linux-3.0.4/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
27812 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27813
27814 int x[32], y[32], w[32], h[32];
27815
27816 + pax_track_stack();
27817 +
27818 /* clear out memory */
27819 memset(&line_list[0], 0x00, sizeof(u32)*32);
27820 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27821 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27822 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
27823 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
27824 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27825 u8 buf[HOST_LINK_BUF_SIZE];
27826 int i;
27827
27828 + pax_track_stack();
27829 +
27830 dprintk("%s\n", __func__);
27831
27832 /* check if we have space for a link buf in the rx_buffer */
27833 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27834 unsigned long timeout;
27835 int written;
27836
27837 + pax_track_stack();
27838 +
27839 dprintk("%s\n", __func__);
27840
27841 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27842 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h
27843 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
27844 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
27845 @@ -68,12 +68,12 @@ struct dvb_demux_feed {
27846 union {
27847 struct dmx_ts_feed ts;
27848 struct dmx_section_feed sec;
27849 - } feed;
27850 + } __no_const feed;
27851
27852 union {
27853 dmx_ts_cb ts;
27854 dmx_section_cb sec;
27855 - } cb;
27856 + } __no_const cb;
27857
27858 struct dvb_demux *demux;
27859 void *priv;
27860 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c
27861 --- linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
27862 +++ linux-3.0.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
27863 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27864 const struct dvb_device *template, void *priv, int type)
27865 {
27866 struct dvb_device *dvbdev;
27867 - struct file_operations *dvbdevfops;
27868 + file_operations_no_const *dvbdevfops;
27869 struct device *clsdev;
27870 int minor;
27871 int id;
27872 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c
27873 --- linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
27874 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
27875 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27876 struct dib0700_adapter_state {
27877 int (*set_param_save) (struct dvb_frontend *,
27878 struct dvb_frontend_parameters *);
27879 -};
27880 +} __no_const;
27881
27882 static int dib7070_set_param_override(struct dvb_frontend *fe,
27883 struct dvb_frontend_parameters *fep)
27884 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27885 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
27886 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
27887 @@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
27888 if (!buf)
27889 return -ENOMEM;
27890
27891 + pax_track_stack();
27892 +
27893 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27894 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27895 hx.addr, hx.len, hx.chk);
27896 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h
27897 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
27898 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
27899 @@ -97,7 +97,7 @@
27900 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
27901
27902 struct dibusb_state {
27903 - struct dib_fe_xfer_ops ops;
27904 + dib_fe_xfer_ops_no_const ops;
27905 int mt2060_present;
27906 u8 tuner_addr;
27907 };
27908 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c
27909 --- linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
27910 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
27911 @@ -95,7 +95,7 @@ struct su3000_state {
27912
27913 struct s6x0_state {
27914 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27915 -};
27916 +} __no_const;
27917
27918 /* debug */
27919 static int dvb_usb_dw2102_debug;
27920 diff -urNp linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c
27921 --- linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
27922 +++ linux-3.0.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
27923 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
27924 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
27925 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
27926
27927 + pax_track_stack();
27928
27929 data[0] = 0x8a;
27930 len_in = 1;
27931 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
27932 int ret = 0, len_in;
27933 u8 data[512] = {0};
27934
27935 + pax_track_stack();
27936 +
27937 data[0] = 0x0a;
27938 len_in = 1;
27939 info("FRM Firmware Cold Reset");
27940 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000.h linux-3.0.4/drivers/media/dvb/frontends/dib3000.h
27941 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
27942 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000.h 2011-08-24 18:28:18.000000000 -0400
27943 @@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
27944 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27945 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27946 };
27947 +typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
27948
27949 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27950 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27951 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
27952 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
27953 #else
27954 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27955 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27956 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c
27957 --- linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
27958 +++ linux-3.0.4/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
27959 @@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
27960 static struct dvb_frontend_ops dib3000mb_ops;
27961
27962 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27963 - struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
27964 + struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
27965 {
27966 struct dib3000_state* state = NULL;
27967
27968 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c
27969 --- linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
27970 +++ linux-3.0.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
27971 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27972 int ret = -1;
27973 int sync;
27974
27975 + pax_track_stack();
27976 +
27977 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27978
27979 fcp = 3000;
27980 diff -urNp linux-3.0.4/drivers/media/dvb/frontends/or51211.c linux-3.0.4/drivers/media/dvb/frontends/or51211.c
27981 --- linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
27982 +++ linux-3.0.4/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
27983 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27984 u8 tudata[585];
27985 int i;
27986
27987 + pax_track_stack();
27988 +
27989 dprintk("Firmware is %zd bytes\n",fw->size);
27990
27991 /* Get eprom data */
27992 diff -urNp linux-3.0.4/drivers/media/video/cx18/cx18-driver.c linux-3.0.4/drivers/media/video/cx18/cx18-driver.c
27993 --- linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
27994 +++ linux-3.0.4/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
27995 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27996 struct i2c_client c;
27997 u8 eedata[256];
27998
27999 + pax_track_stack();
28000 +
28001 memset(&c, 0, sizeof(c));
28002 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
28003 c.adapter = &cx->i2c_adap[0];
28004 diff -urNp linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c
28005 --- linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
28006 +++ linux-3.0.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
28007 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
28008 bool handle = false;
28009 struct ir_raw_event ir_core_event[64];
28010
28011 + pax_track_stack();
28012 +
28013 do {
28014 num = 0;
28015 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
28016 diff -urNp linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
28017 --- linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
28018 +++ linux-3.0.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
28019 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
28020 u8 *eeprom;
28021 struct tveeprom tvdata;
28022
28023 + pax_track_stack();
28024 +
28025 memset(&tvdata,0,sizeof(tvdata));
28026
28027 eeprom = pvr2_eeprom_fetch(hdw);
28028 diff -urNp linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c
28029 --- linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
28030 +++ linux-3.0.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
28031 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
28032 unsigned char localPAT[256];
28033 unsigned char localPMT[256];
28034
28035 + pax_track_stack();
28036 +
28037 /* Set video format - must be done first as it resets other settings */
28038 set_reg8(client, 0x41, h->video_format);
28039
28040 diff -urNp linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c
28041 --- linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
28042 +++ linux-3.0.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
28043 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
28044 u8 tmp[512];
28045 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28046
28047 + pax_track_stack();
28048 +
28049 /* While any outstand message on the bus exists... */
28050 do {
28051
28052 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
28053 u8 tmp[512];
28054 dprintk(DBGLVL_CMD, "%s()\n", __func__);
28055
28056 + pax_track_stack();
28057 +
28058 while (loop) {
28059
28060 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
28061 diff -urNp linux-3.0.4/drivers/media/video/timblogiw.c linux-3.0.4/drivers/media/video/timblogiw.c
28062 --- linux-3.0.4/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
28063 +++ linux-3.0.4/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
28064 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
28065
28066 /* Platform device functions */
28067
28068 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
28069 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
28070 .vidioc_querycap = timblogiw_querycap,
28071 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
28072 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
28073 diff -urNp linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c
28074 --- linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
28075 +++ linux-3.0.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
28076 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
28077 unsigned char rv, gv, bv;
28078 static unsigned char *Y, *U, *V;
28079
28080 + pax_track_stack();
28081 +
28082 frame = usbvision->cur_frame;
28083 image_size = frame->frmwidth * frame->frmheight;
28084 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
28085 diff -urNp linux-3.0.4/drivers/media/video/videobuf-dma-sg.c linux-3.0.4/drivers/media/video/videobuf-dma-sg.c
28086 --- linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
28087 +++ linux-3.0.4/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
28088 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
28089 {
28090 struct videobuf_queue q;
28091
28092 + pax_track_stack();
28093 +
28094 /* Required to make generic handler to call __videobuf_alloc */
28095 q.int_ops = &sg_ops;
28096
28097 diff -urNp linux-3.0.4/drivers/message/fusion/mptbase.c linux-3.0.4/drivers/message/fusion/mptbase.c
28098 --- linux-3.0.4/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
28099 +++ linux-3.0.4/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
28100 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
28101 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
28102 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
28103
28104 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28105 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
28106 +#else
28107 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
28108 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
28109 +#endif
28110 +
28111 /*
28112 * Rounding UP to nearest 4-kB boundary here...
28113 */
28114 diff -urNp linux-3.0.4/drivers/message/fusion/mptsas.c linux-3.0.4/drivers/message/fusion/mptsas.c
28115 --- linux-3.0.4/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
28116 +++ linux-3.0.4/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
28117 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
28118 return 0;
28119 }
28120
28121 +static inline void
28122 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28123 +{
28124 + if (phy_info->port_details) {
28125 + phy_info->port_details->rphy = rphy;
28126 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28127 + ioc->name, rphy));
28128 + }
28129 +
28130 + if (rphy) {
28131 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28132 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28133 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28134 + ioc->name, rphy, rphy->dev.release));
28135 + }
28136 +}
28137 +
28138 /* no mutex */
28139 static void
28140 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
28141 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
28142 return NULL;
28143 }
28144
28145 -static inline void
28146 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
28147 -{
28148 - if (phy_info->port_details) {
28149 - phy_info->port_details->rphy = rphy;
28150 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
28151 - ioc->name, rphy));
28152 - }
28153 -
28154 - if (rphy) {
28155 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
28156 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
28157 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
28158 - ioc->name, rphy, rphy->dev.release));
28159 - }
28160 -}
28161 -
28162 static inline struct sas_port *
28163 mptsas_get_port(struct mptsas_phyinfo *phy_info)
28164 {
28165 diff -urNp linux-3.0.4/drivers/message/fusion/mptscsih.c linux-3.0.4/drivers/message/fusion/mptscsih.c
28166 --- linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
28167 +++ linux-3.0.4/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
28168 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
28169
28170 h = shost_priv(SChost);
28171
28172 - if (h) {
28173 - if (h->info_kbuf == NULL)
28174 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28175 - return h->info_kbuf;
28176 - h->info_kbuf[0] = '\0';
28177 + if (!h)
28178 + return NULL;
28179
28180 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28181 - h->info_kbuf[size-1] = '\0';
28182 - }
28183 + if (h->info_kbuf == NULL)
28184 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
28185 + return h->info_kbuf;
28186 + h->info_kbuf[0] = '\0';
28187 +
28188 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
28189 + h->info_kbuf[size-1] = '\0';
28190
28191 return h->info_kbuf;
28192 }
28193 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_config.c linux-3.0.4/drivers/message/i2o/i2o_config.c
28194 --- linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
28195 +++ linux-3.0.4/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
28196 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
28197 struct i2o_message *msg;
28198 unsigned int iop;
28199
28200 + pax_track_stack();
28201 +
28202 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
28203 return -EFAULT;
28204
28205 diff -urNp linux-3.0.4/drivers/message/i2o/i2o_proc.c linux-3.0.4/drivers/message/i2o/i2o_proc.c
28206 --- linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
28207 +++ linux-3.0.4/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
28208 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
28209 "Array Controller Device"
28210 };
28211
28212 -static char *chtostr(u8 * chars, int n)
28213 -{
28214 - char tmp[256];
28215 - tmp[0] = 0;
28216 - return strncat(tmp, (char *)chars, n);
28217 -}
28218 -
28219 static int i2o_report_query_status(struct seq_file *seq, int block_status,
28220 char *group)
28221 {
28222 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
28223
28224 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
28225 seq_printf(seq, "%-#8x", ddm_table.module_id);
28226 - seq_printf(seq, "%-29s",
28227 - chtostr(ddm_table.module_name_version, 28));
28228 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
28229 seq_printf(seq, "%9d ", ddm_table.data_size);
28230 seq_printf(seq, "%8d", ddm_table.code_size);
28231
28232 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
28233
28234 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
28235 seq_printf(seq, "%-#8x", dst->module_id);
28236 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
28237 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
28238 + seq_printf(seq, "%-.28s", dst->module_name_version);
28239 + seq_printf(seq, "%-.8s", dst->date);
28240 seq_printf(seq, "%8d ", dst->module_size);
28241 seq_printf(seq, "%8d ", dst->mpb_size);
28242 seq_printf(seq, "0x%04x", dst->module_flags);
28243 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
28244 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
28245 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
28246 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
28247 - seq_printf(seq, "Vendor info : %s\n",
28248 - chtostr((u8 *) (work32 + 2), 16));
28249 - seq_printf(seq, "Product info : %s\n",
28250 - chtostr((u8 *) (work32 + 6), 16));
28251 - seq_printf(seq, "Description : %s\n",
28252 - chtostr((u8 *) (work32 + 10), 16));
28253 - seq_printf(seq, "Product rev. : %s\n",
28254 - chtostr((u8 *) (work32 + 14), 8));
28255 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
28256 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
28257 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
28258 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
28259
28260 seq_printf(seq, "Serial number : ");
28261 print_serial_number(seq, (u8 *) (work32 + 16),
28262 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
28263 }
28264
28265 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
28266 - seq_printf(seq, "Module name : %s\n",
28267 - chtostr(result.module_name, 24));
28268 - seq_printf(seq, "Module revision : %s\n",
28269 - chtostr(result.module_rev, 8));
28270 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
28271 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
28272
28273 seq_printf(seq, "Serial number : ");
28274 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
28275 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
28276 return 0;
28277 }
28278
28279 - seq_printf(seq, "Device name : %s\n",
28280 - chtostr(result.device_name, 64));
28281 - seq_printf(seq, "Service name : %s\n",
28282 - chtostr(result.service_name, 64));
28283 - seq_printf(seq, "Physical name : %s\n",
28284 - chtostr(result.physical_location, 64));
28285 - seq_printf(seq, "Instance number : %s\n",
28286 - chtostr(result.instance_number, 4));
28287 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
28288 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
28289 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
28290 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
28291
28292 return 0;
28293 }
28294 diff -urNp linux-3.0.4/drivers/message/i2o/iop.c linux-3.0.4/drivers/message/i2o/iop.c
28295 --- linux-3.0.4/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
28296 +++ linux-3.0.4/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
28297 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
28298
28299 spin_lock_irqsave(&c->context_list_lock, flags);
28300
28301 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
28302 - atomic_inc(&c->context_list_counter);
28303 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
28304 + atomic_inc_unchecked(&c->context_list_counter);
28305
28306 - entry->context = atomic_read(&c->context_list_counter);
28307 + entry->context = atomic_read_unchecked(&c->context_list_counter);
28308
28309 list_add(&entry->list, &c->context_list);
28310
28311 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
28312
28313 #if BITS_PER_LONG == 64
28314 spin_lock_init(&c->context_list_lock);
28315 - atomic_set(&c->context_list_counter, 0);
28316 + atomic_set_unchecked(&c->context_list_counter, 0);
28317 INIT_LIST_HEAD(&c->context_list);
28318 #endif
28319
28320 diff -urNp linux-3.0.4/drivers/mfd/abx500-core.c linux-3.0.4/drivers/mfd/abx500-core.c
28321 --- linux-3.0.4/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
28322 +++ linux-3.0.4/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
28323 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
28324
28325 struct abx500_device_entry {
28326 struct list_head list;
28327 - struct abx500_ops ops;
28328 + abx500_ops_no_const ops;
28329 struct device *dev;
28330 };
28331
28332 diff -urNp linux-3.0.4/drivers/mfd/janz-cmodio.c linux-3.0.4/drivers/mfd/janz-cmodio.c
28333 --- linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
28334 +++ linux-3.0.4/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
28335 @@ -13,6 +13,7 @@
28336
28337 #include <linux/kernel.h>
28338 #include <linux/module.h>
28339 +#include <linux/slab.h>
28340 #include <linux/init.h>
28341 #include <linux/pci.h>
28342 #include <linux/interrupt.h>
28343 diff -urNp linux-3.0.4/drivers/mfd/wm8350-i2c.c linux-3.0.4/drivers/mfd/wm8350-i2c.c
28344 --- linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
28345 +++ linux-3.0.4/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
28346 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
28347 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
28348 int ret;
28349
28350 + pax_track_stack();
28351 +
28352 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
28353 return -EINVAL;
28354
28355 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c
28356 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-07-21 22:17:23.000000000 -0400
28357 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-23 21:47:55.000000000 -0400
28358 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
28359 * the lid is closed. This leads to interrupts as soon as a little move
28360 * is done.
28361 */
28362 - atomic_inc(&lis3_dev.count);
28363 + atomic_inc_unchecked(&lis3_dev.count);
28364
28365 wake_up_interruptible(&lis3_dev.misc_wait);
28366 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28367 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
28368 if (lis3_dev.pm_dev)
28369 pm_runtime_get_sync(lis3_dev.pm_dev);
28370
28371 - atomic_set(&lis3_dev.count, 0);
28372 + atomic_set_unchecked(&lis3_dev.count, 0);
28373 return 0;
28374 }
28375
28376 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
28377 add_wait_queue(&lis3_dev.misc_wait, &wait);
28378 while (true) {
28379 set_current_state(TASK_INTERRUPTIBLE);
28380 - data = atomic_xchg(&lis3_dev.count, 0);
28381 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28382 if (data)
28383 break;
28384
28385 @@ -583,7 +583,7 @@ out:
28386 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28387 {
28388 poll_wait(file, &lis3_dev.misc_wait, wait);
28389 - if (atomic_read(&lis3_dev.count))
28390 + if (atomic_read_unchecked(&lis3_dev.count))
28391 return POLLIN | POLLRDNORM;
28392 return 0;
28393 }
28394 diff -urNp linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h
28395 --- linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
28396 +++ linux-3.0.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
28397 @@ -265,7 +265,7 @@ struct lis3lv02d {
28398 struct input_polled_dev *idev; /* input device */
28399 struct platform_device *pdev; /* platform device */
28400 struct regulator_bulk_data regulators[2];
28401 - atomic_t count; /* interrupt count after last read */
28402 + atomic_unchecked_t count; /* interrupt count after last read */
28403 union axis_conversion ac; /* hw -> logical axis */
28404 int mapped_btns[3];
28405
28406 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c
28407 --- linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
28408 +++ linux-3.0.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
28409 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
28410 unsigned long nsec;
28411
28412 nsec = CLKS2NSEC(clks);
28413 - atomic_long_inc(&mcs_op_statistics[op].count);
28414 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
28415 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28416 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28417 if (mcs_op_statistics[op].max < nsec)
28418 mcs_op_statistics[op].max = nsec;
28419 }
28420 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c
28421 --- linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
28422 +++ linux-3.0.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
28423 @@ -32,9 +32,9 @@
28424
28425 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28426
28427 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28428 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28429 {
28430 - unsigned long val = atomic_long_read(v);
28431 + unsigned long val = atomic_long_read_unchecked(v);
28432
28433 seq_printf(s, "%16lu %s\n", val, id);
28434 }
28435 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28436
28437 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28438 for (op = 0; op < mcsop_last; op++) {
28439 - count = atomic_long_read(&mcs_op_statistics[op].count);
28440 - total = atomic_long_read(&mcs_op_statistics[op].total);
28441 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28442 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28443 max = mcs_op_statistics[op].max;
28444 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28445 count ? total / count : 0, max);
28446 diff -urNp linux-3.0.4/drivers/misc/sgi-gru/grutables.h linux-3.0.4/drivers/misc/sgi-gru/grutables.h
28447 --- linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
28448 +++ linux-3.0.4/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
28449 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28450 * GRU statistics.
28451 */
28452 struct gru_stats_s {
28453 - atomic_long_t vdata_alloc;
28454 - atomic_long_t vdata_free;
28455 - atomic_long_t gts_alloc;
28456 - atomic_long_t gts_free;
28457 - atomic_long_t gms_alloc;
28458 - atomic_long_t gms_free;
28459 - atomic_long_t gts_double_allocate;
28460 - atomic_long_t assign_context;
28461 - atomic_long_t assign_context_failed;
28462 - atomic_long_t free_context;
28463 - atomic_long_t load_user_context;
28464 - atomic_long_t load_kernel_context;
28465 - atomic_long_t lock_kernel_context;
28466 - atomic_long_t unlock_kernel_context;
28467 - atomic_long_t steal_user_context;
28468 - atomic_long_t steal_kernel_context;
28469 - atomic_long_t steal_context_failed;
28470 - atomic_long_t nopfn;
28471 - atomic_long_t asid_new;
28472 - atomic_long_t asid_next;
28473 - atomic_long_t asid_wrap;
28474 - atomic_long_t asid_reuse;
28475 - atomic_long_t intr;
28476 - atomic_long_t intr_cbr;
28477 - atomic_long_t intr_tfh;
28478 - atomic_long_t intr_spurious;
28479 - atomic_long_t intr_mm_lock_failed;
28480 - atomic_long_t call_os;
28481 - atomic_long_t call_os_wait_queue;
28482 - atomic_long_t user_flush_tlb;
28483 - atomic_long_t user_unload_context;
28484 - atomic_long_t user_exception;
28485 - atomic_long_t set_context_option;
28486 - atomic_long_t check_context_retarget_intr;
28487 - atomic_long_t check_context_unload;
28488 - atomic_long_t tlb_dropin;
28489 - atomic_long_t tlb_preload_page;
28490 - atomic_long_t tlb_dropin_fail_no_asid;
28491 - atomic_long_t tlb_dropin_fail_upm;
28492 - atomic_long_t tlb_dropin_fail_invalid;
28493 - atomic_long_t tlb_dropin_fail_range_active;
28494 - atomic_long_t tlb_dropin_fail_idle;
28495 - atomic_long_t tlb_dropin_fail_fmm;
28496 - atomic_long_t tlb_dropin_fail_no_exception;
28497 - atomic_long_t tfh_stale_on_fault;
28498 - atomic_long_t mmu_invalidate_range;
28499 - atomic_long_t mmu_invalidate_page;
28500 - atomic_long_t flush_tlb;
28501 - atomic_long_t flush_tlb_gru;
28502 - atomic_long_t flush_tlb_gru_tgh;
28503 - atomic_long_t flush_tlb_gru_zero_asid;
28504 -
28505 - atomic_long_t copy_gpa;
28506 - atomic_long_t read_gpa;
28507 -
28508 - atomic_long_t mesq_receive;
28509 - atomic_long_t mesq_receive_none;
28510 - atomic_long_t mesq_send;
28511 - atomic_long_t mesq_send_failed;
28512 - atomic_long_t mesq_noop;
28513 - atomic_long_t mesq_send_unexpected_error;
28514 - atomic_long_t mesq_send_lb_overflow;
28515 - atomic_long_t mesq_send_qlimit_reached;
28516 - atomic_long_t mesq_send_amo_nacked;
28517 - atomic_long_t mesq_send_put_nacked;
28518 - atomic_long_t mesq_page_overflow;
28519 - atomic_long_t mesq_qf_locked;
28520 - atomic_long_t mesq_qf_noop_not_full;
28521 - atomic_long_t mesq_qf_switch_head_failed;
28522 - atomic_long_t mesq_qf_unexpected_error;
28523 - atomic_long_t mesq_noop_unexpected_error;
28524 - atomic_long_t mesq_noop_lb_overflow;
28525 - atomic_long_t mesq_noop_qlimit_reached;
28526 - atomic_long_t mesq_noop_amo_nacked;
28527 - atomic_long_t mesq_noop_put_nacked;
28528 - atomic_long_t mesq_noop_page_overflow;
28529 + atomic_long_unchecked_t vdata_alloc;
28530 + atomic_long_unchecked_t vdata_free;
28531 + atomic_long_unchecked_t gts_alloc;
28532 + atomic_long_unchecked_t gts_free;
28533 + atomic_long_unchecked_t gms_alloc;
28534 + atomic_long_unchecked_t gms_free;
28535 + atomic_long_unchecked_t gts_double_allocate;
28536 + atomic_long_unchecked_t assign_context;
28537 + atomic_long_unchecked_t assign_context_failed;
28538 + atomic_long_unchecked_t free_context;
28539 + atomic_long_unchecked_t load_user_context;
28540 + atomic_long_unchecked_t load_kernel_context;
28541 + atomic_long_unchecked_t lock_kernel_context;
28542 + atomic_long_unchecked_t unlock_kernel_context;
28543 + atomic_long_unchecked_t steal_user_context;
28544 + atomic_long_unchecked_t steal_kernel_context;
28545 + atomic_long_unchecked_t steal_context_failed;
28546 + atomic_long_unchecked_t nopfn;
28547 + atomic_long_unchecked_t asid_new;
28548 + atomic_long_unchecked_t asid_next;
28549 + atomic_long_unchecked_t asid_wrap;
28550 + atomic_long_unchecked_t asid_reuse;
28551 + atomic_long_unchecked_t intr;
28552 + atomic_long_unchecked_t intr_cbr;
28553 + atomic_long_unchecked_t intr_tfh;
28554 + atomic_long_unchecked_t intr_spurious;
28555 + atomic_long_unchecked_t intr_mm_lock_failed;
28556 + atomic_long_unchecked_t call_os;
28557 + atomic_long_unchecked_t call_os_wait_queue;
28558 + atomic_long_unchecked_t user_flush_tlb;
28559 + atomic_long_unchecked_t user_unload_context;
28560 + atomic_long_unchecked_t user_exception;
28561 + atomic_long_unchecked_t set_context_option;
28562 + atomic_long_unchecked_t check_context_retarget_intr;
28563 + atomic_long_unchecked_t check_context_unload;
28564 + atomic_long_unchecked_t tlb_dropin;
28565 + atomic_long_unchecked_t tlb_preload_page;
28566 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28567 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28568 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28569 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28570 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28571 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28572 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28573 + atomic_long_unchecked_t tfh_stale_on_fault;
28574 + atomic_long_unchecked_t mmu_invalidate_range;
28575 + atomic_long_unchecked_t mmu_invalidate_page;
28576 + atomic_long_unchecked_t flush_tlb;
28577 + atomic_long_unchecked_t flush_tlb_gru;
28578 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28579 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28580 +
28581 + atomic_long_unchecked_t copy_gpa;
28582 + atomic_long_unchecked_t read_gpa;
28583 +
28584 + atomic_long_unchecked_t mesq_receive;
28585 + atomic_long_unchecked_t mesq_receive_none;
28586 + atomic_long_unchecked_t mesq_send;
28587 + atomic_long_unchecked_t mesq_send_failed;
28588 + atomic_long_unchecked_t mesq_noop;
28589 + atomic_long_unchecked_t mesq_send_unexpected_error;
28590 + atomic_long_unchecked_t mesq_send_lb_overflow;
28591 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28592 + atomic_long_unchecked_t mesq_send_amo_nacked;
28593 + atomic_long_unchecked_t mesq_send_put_nacked;
28594 + atomic_long_unchecked_t mesq_page_overflow;
28595 + atomic_long_unchecked_t mesq_qf_locked;
28596 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28597 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28598 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28599 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28600 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28601 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28602 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28603 + atomic_long_unchecked_t mesq_noop_put_nacked;
28604 + atomic_long_unchecked_t mesq_noop_page_overflow;
28605
28606 };
28607
28608 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28609 tghop_invalidate, mcsop_last};
28610
28611 struct mcs_op_statistic {
28612 - atomic_long_t count;
28613 - atomic_long_t total;
28614 + atomic_long_unchecked_t count;
28615 + atomic_long_unchecked_t total;
28616 unsigned long max;
28617 };
28618
28619 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28620
28621 #define STAT(id) do { \
28622 if (gru_options & OPT_STATS) \
28623 - atomic_long_inc(&gru_stats.id); \
28624 + atomic_long_inc_unchecked(&gru_stats.id); \
28625 } while (0)
28626
28627 #ifdef CONFIG_SGI_GRU_DEBUG
28628 diff -urNp linux-3.0.4/drivers/misc/sgi-xp/xp.h linux-3.0.4/drivers/misc/sgi-xp/xp.h
28629 --- linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
28630 +++ linux-3.0.4/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
28631 @@ -289,7 +289,7 @@ struct xpc_interface {
28632 xpc_notify_func, void *);
28633 void (*received) (short, int, void *);
28634 enum xp_retval (*partid_to_nasids) (short, void *);
28635 -};
28636 +} __no_const;
28637
28638 extern struct xpc_interface xpc_interface;
28639
28640 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c
28641 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
28642 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
28643 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28644 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28645 unsigned long timeo = jiffies + HZ;
28646
28647 + pax_track_stack();
28648 +
28649 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28650 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28651 goto sleep;
28652 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
28653 unsigned long initial_adr;
28654 int initial_len = len;
28655
28656 + pax_track_stack();
28657 +
28658 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28659 adr += chip->start;
28660 initial_adr = adr;
28661 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
28662 int retries = 3;
28663 int ret;
28664
28665 + pax_track_stack();
28666 +
28667 adr += chip->start;
28668
28669 retry:
28670 diff -urNp linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c
28671 --- linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
28672 +++ linux-3.0.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
28673 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28674 unsigned long cmd_addr;
28675 struct cfi_private *cfi = map->fldrv_priv;
28676
28677 + pax_track_stack();
28678 +
28679 adr += chip->start;
28680
28681 /* Ensure cmd read/writes are aligned. */
28682 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
28683 DECLARE_WAITQUEUE(wait, current);
28684 int wbufsize, z;
28685
28686 + pax_track_stack();
28687 +
28688 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28689 if (adr & (map_bankwidth(map)-1))
28690 return -EINVAL;
28691 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
28692 DECLARE_WAITQUEUE(wait, current);
28693 int ret = 0;
28694
28695 + pax_track_stack();
28696 +
28697 adr += chip->start;
28698
28699 /* Let's determine this according to the interleave only once */
28700 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
28701 unsigned long timeo = jiffies + HZ;
28702 DECLARE_WAITQUEUE(wait, current);
28703
28704 + pax_track_stack();
28705 +
28706 adr += chip->start;
28707
28708 /* Let's determine this according to the interleave only once */
28709 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
28710 unsigned long timeo = jiffies + HZ;
28711 DECLARE_WAITQUEUE(wait, current);
28712
28713 + pax_track_stack();
28714 +
28715 adr += chip->start;
28716
28717 /* Let's determine this according to the interleave only once */
28718 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2000.c linux-3.0.4/drivers/mtd/devices/doc2000.c
28719 --- linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
28720 +++ linux-3.0.4/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
28721 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28722
28723 /* The ECC will not be calculated correctly if less than 512 is written */
28724 /* DBB-
28725 - if (len != 0x200 && eccbuf)
28726 + if (len != 0x200)
28727 printk(KERN_WARNING
28728 "ECC needs a full sector write (adr: %lx size %lx)\n",
28729 (long) to, (long) len);
28730 diff -urNp linux-3.0.4/drivers/mtd/devices/doc2001.c linux-3.0.4/drivers/mtd/devices/doc2001.c
28731 --- linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
28732 +++ linux-3.0.4/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
28733 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28734 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28735
28736 /* Don't allow read past end of device */
28737 - if (from >= this->totlen)
28738 + if (from >= this->totlen || !len)
28739 return -EINVAL;
28740
28741 /* Don't allow a single read to cross a 512-byte block boundary */
28742 diff -urNp linux-3.0.4/drivers/mtd/ftl.c linux-3.0.4/drivers/mtd/ftl.c
28743 --- linux-3.0.4/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
28744 +++ linux-3.0.4/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
28745 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28746 loff_t offset;
28747 uint16_t srcunitswap = cpu_to_le16(srcunit);
28748
28749 + pax_track_stack();
28750 +
28751 eun = &part->EUNInfo[srcunit];
28752 xfer = &part->XferInfo[xferunit];
28753 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28754 diff -urNp linux-3.0.4/drivers/mtd/inftlcore.c linux-3.0.4/drivers/mtd/inftlcore.c
28755 --- linux-3.0.4/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
28756 +++ linux-3.0.4/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
28757 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28758 struct inftl_oob oob;
28759 size_t retlen;
28760
28761 + pax_track_stack();
28762 +
28763 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28764 "pending=%d)\n", inftl, thisVUC, pendingblock);
28765
28766 diff -urNp linux-3.0.4/drivers/mtd/inftlmount.c linux-3.0.4/drivers/mtd/inftlmount.c
28767 --- linux-3.0.4/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
28768 +++ linux-3.0.4/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
28769 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28770 struct INFTLPartition *ip;
28771 size_t retlen;
28772
28773 + pax_track_stack();
28774 +
28775 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28776
28777 /*
28778 diff -urNp linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c
28779 --- linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
28780 +++ linux-3.0.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
28781 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28782 {
28783 map_word pfow_val[4];
28784
28785 + pax_track_stack();
28786 +
28787 /* Check identification string */
28788 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28789 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28790 diff -urNp linux-3.0.4/drivers/mtd/mtdchar.c linux-3.0.4/drivers/mtd/mtdchar.c
28791 --- linux-3.0.4/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
28792 +++ linux-3.0.4/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
28793 @@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
28794 u_long size;
28795 struct mtd_info_user info;
28796
28797 + pax_track_stack();
28798 +
28799 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28800
28801 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28802 diff -urNp linux-3.0.4/drivers/mtd/nand/denali.c linux-3.0.4/drivers/mtd/nand/denali.c
28803 --- linux-3.0.4/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
28804 +++ linux-3.0.4/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
28805 @@ -26,6 +26,7 @@
28806 #include <linux/pci.h>
28807 #include <linux/mtd/mtd.h>
28808 #include <linux/module.h>
28809 +#include <linux/slab.h>
28810
28811 #include "denali.h"
28812
28813 diff -urNp linux-3.0.4/drivers/mtd/nftlcore.c linux-3.0.4/drivers/mtd/nftlcore.c
28814 --- linux-3.0.4/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
28815 +++ linux-3.0.4/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
28816 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28817 int inplace = 1;
28818 size_t retlen;
28819
28820 + pax_track_stack();
28821 +
28822 memset(BlockMap, 0xff, sizeof(BlockMap));
28823 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28824
28825 diff -urNp linux-3.0.4/drivers/mtd/nftlmount.c linux-3.0.4/drivers/mtd/nftlmount.c
28826 --- linux-3.0.4/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
28827 +++ linux-3.0.4/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
28828 @@ -24,6 +24,7 @@
28829 #include <asm/errno.h>
28830 #include <linux/delay.h>
28831 #include <linux/slab.h>
28832 +#include <linux/sched.h>
28833 #include <linux/mtd/mtd.h>
28834 #include <linux/mtd/nand.h>
28835 #include <linux/mtd/nftl.h>
28836 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28837 struct mtd_info *mtd = nftl->mbd.mtd;
28838 unsigned int i;
28839
28840 + pax_track_stack();
28841 +
28842 /* Assume logical EraseSize == physical erasesize for starting the scan.
28843 We'll sort it out later if we find a MediaHeader which says otherwise */
28844 /* Actually, we won't. The new DiskOnChip driver has already scanned
28845 diff -urNp linux-3.0.4/drivers/mtd/ubi/build.c linux-3.0.4/drivers/mtd/ubi/build.c
28846 --- linux-3.0.4/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
28847 +++ linux-3.0.4/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
28848 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28849 static int __init bytes_str_to_int(const char *str)
28850 {
28851 char *endp;
28852 - unsigned long result;
28853 + unsigned long result, scale = 1;
28854
28855 result = simple_strtoul(str, &endp, 0);
28856 if (str == endp || result >= INT_MAX) {
28857 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28858
28859 switch (*endp) {
28860 case 'G':
28861 - result *= 1024;
28862 + scale *= 1024;
28863 case 'M':
28864 - result *= 1024;
28865 + scale *= 1024;
28866 case 'K':
28867 - result *= 1024;
28868 + scale *= 1024;
28869 if (endp[1] == 'i' && endp[2] == 'B')
28870 endp += 2;
28871 case '\0':
28872 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28873 return -EINVAL;
28874 }
28875
28876 - return result;
28877 + if ((intoverflow_t)result*scale >= INT_MAX) {
28878 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28879 + str);
28880 + return -EINVAL;
28881 + }
28882 +
28883 + return result*scale;
28884 }
28885
28886 /**
28887 diff -urNp linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c
28888 --- linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
28889 +++ linux-3.0.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
28890 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28891 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28892 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28893
28894 -static struct bfa_ioc_hwif nw_hwif_ct;
28895 +static struct bfa_ioc_hwif nw_hwif_ct = {
28896 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28897 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28898 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28899 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28900 + .ioc_map_port = bfa_ioc_ct_map_port,
28901 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28902 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28903 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28904 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28905 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28906 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28907 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28908 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28909 +};
28910
28911 /**
28912 * Called from bfa_ioc_attach() to map asic specific calls.
28913 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28914 void
28915 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28916 {
28917 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28918 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28919 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28920 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28921 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28922 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28923 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28924 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28925 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28926 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28927 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28928 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28929 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28930 -
28931 ioc->ioc_hwif = &nw_hwif_ct;
28932 }
28933
28934 diff -urNp linux-3.0.4/drivers/net/bna/bnad.c linux-3.0.4/drivers/net/bna/bnad.c
28935 --- linux-3.0.4/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
28936 +++ linux-3.0.4/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
28937 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28938 struct bna_intr_info *intr_info =
28939 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28940 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28941 - struct bna_tx_event_cbfn tx_cbfn;
28942 + static struct bna_tx_event_cbfn tx_cbfn = {
28943 + /* Initialize the tx event handlers */
28944 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28945 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28946 + .tx_stall_cbfn = bnad_cb_tx_stall,
28947 + .tx_resume_cbfn = bnad_cb_tx_resume,
28948 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28949 + };
28950 struct bna_tx *tx;
28951 unsigned long flags;
28952
28953 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28954 tx_config->txq_depth = bnad->txq_depth;
28955 tx_config->tx_type = BNA_TX_T_REGULAR;
28956
28957 - /* Initialize the tx event handlers */
28958 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28959 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28960 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28961 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28962 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28963 -
28964 /* Get BNA's resource requirement for one tx object */
28965 spin_lock_irqsave(&bnad->bna_lock, flags);
28966 bna_tx_res_req(bnad->num_txq_per_tx,
28967 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28968 struct bna_intr_info *intr_info =
28969 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28970 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28971 - struct bna_rx_event_cbfn rx_cbfn;
28972 + static struct bna_rx_event_cbfn rx_cbfn = {
28973 + /* Initialize the Rx event handlers */
28974 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28975 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28976 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28977 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28978 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28979 + .rx_post_cbfn = bnad_cb_rx_post
28980 + };
28981 struct bna_rx *rx;
28982 unsigned long flags;
28983
28984 /* Initialize the Rx object configuration */
28985 bnad_init_rx_config(bnad, rx_config);
28986
28987 - /* Initialize the Rx event handlers */
28988 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28989 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28990 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28991 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28992 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28993 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28994 -
28995 /* Get BNA's resource requirement for one Rx object */
28996 spin_lock_irqsave(&bnad->bna_lock, flags);
28997 bna_rx_res_req(rx_config, res_info);
28998 diff -urNp linux-3.0.4/drivers/net/bnx2.c linux-3.0.4/drivers/net/bnx2.c
28999 --- linux-3.0.4/drivers/net/bnx2.c 2011-07-21 22:17:23.000000000 -0400
29000 +++ linux-3.0.4/drivers/net/bnx2.c 2011-08-23 21:48:14.000000000 -0400
29001 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
29002 int rc = 0;
29003 u32 magic, csum;
29004
29005 + pax_track_stack();
29006 +
29007 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
29008 goto test_nvram_done;
29009
29010 diff -urNp linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c
29011 --- linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
29012 +++ linux-3.0.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
29013 @@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
29014 int i, rc;
29015 u32 magic, crc;
29016
29017 + pax_track_stack();
29018 +
29019 if (BP_NOMCP(bp))
29020 return 0;
29021
29022 diff -urNp linux-3.0.4/drivers/net/cxgb3/l2t.h linux-3.0.4/drivers/net/cxgb3/l2t.h
29023 --- linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-07-21 22:17:23.000000000 -0400
29024 +++ linux-3.0.4/drivers/net/cxgb3/l2t.h 2011-08-23 21:47:55.000000000 -0400
29025 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
29026 */
29027 struct l2t_skb_cb {
29028 arp_failure_handler_func arp_failure_handler;
29029 -};
29030 +} __no_const;
29031
29032 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
29033
29034 diff -urNp linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c
29035 --- linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
29036 +++ linux-3.0.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
29037 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
29038 unsigned int nchan = adap->params.nports;
29039 struct msix_entry entries[MAX_INGQ + 1];
29040
29041 + pax_track_stack();
29042 +
29043 for (i = 0; i < ARRAY_SIZE(entries); ++i)
29044 entries[i].entry = i;
29045
29046 diff -urNp linux-3.0.4/drivers/net/cxgb4/t4_hw.c linux-3.0.4/drivers/net/cxgb4/t4_hw.c
29047 --- linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
29048 +++ linux-3.0.4/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
29049 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
29050 u8 vpd[VPD_LEN], csum;
29051 unsigned int vpdr_len, kw_offset, id_len;
29052
29053 + pax_track_stack();
29054 +
29055 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
29056 if (ret < 0)
29057 return ret;
29058 diff -urNp linux-3.0.4/drivers/net/e1000e/82571.c linux-3.0.4/drivers/net/e1000e/82571.c
29059 --- linux-3.0.4/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
29060 +++ linux-3.0.4/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
29061 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
29062 {
29063 struct e1000_hw *hw = &adapter->hw;
29064 struct e1000_mac_info *mac = &hw->mac;
29065 - struct e1000_mac_operations *func = &mac->ops;
29066 + e1000_mac_operations_no_const *func = &mac->ops;
29067 u32 swsm = 0;
29068 u32 swsm2 = 0;
29069 bool force_clear_smbi = false;
29070 diff -urNp linux-3.0.4/drivers/net/e1000e/es2lan.c linux-3.0.4/drivers/net/e1000e/es2lan.c
29071 --- linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
29072 +++ linux-3.0.4/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
29073 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
29074 {
29075 struct e1000_hw *hw = &adapter->hw;
29076 struct e1000_mac_info *mac = &hw->mac;
29077 - struct e1000_mac_operations *func = &mac->ops;
29078 + e1000_mac_operations_no_const *func = &mac->ops;
29079
29080 /* Set media type */
29081 switch (adapter->pdev->device) {
29082 diff -urNp linux-3.0.4/drivers/net/e1000e/hw.h linux-3.0.4/drivers/net/e1000e/hw.h
29083 --- linux-3.0.4/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
29084 +++ linux-3.0.4/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
29085 @@ -776,6 +776,7 @@ struct e1000_mac_operations {
29086 void (*write_vfta)(struct e1000_hw *, u32, u32);
29087 s32 (*read_mac_addr)(struct e1000_hw *);
29088 };
29089 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29090
29091 /* Function pointers for the PHY. */
29092 struct e1000_phy_operations {
29093 @@ -799,6 +800,7 @@ struct e1000_phy_operations {
29094 void (*power_up)(struct e1000_hw *);
29095 void (*power_down)(struct e1000_hw *);
29096 };
29097 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29098
29099 /* Function pointers for the NVM. */
29100 struct e1000_nvm_operations {
29101 @@ -810,9 +812,10 @@ struct e1000_nvm_operations {
29102 s32 (*validate)(struct e1000_hw *);
29103 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
29104 };
29105 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29106
29107 struct e1000_mac_info {
29108 - struct e1000_mac_operations ops;
29109 + e1000_mac_operations_no_const ops;
29110 u8 addr[ETH_ALEN];
29111 u8 perm_addr[ETH_ALEN];
29112
29113 @@ -853,7 +856,7 @@ struct e1000_mac_info {
29114 };
29115
29116 struct e1000_phy_info {
29117 - struct e1000_phy_operations ops;
29118 + e1000_phy_operations_no_const ops;
29119
29120 enum e1000_phy_type type;
29121
29122 @@ -887,7 +890,7 @@ struct e1000_phy_info {
29123 };
29124
29125 struct e1000_nvm_info {
29126 - struct e1000_nvm_operations ops;
29127 + e1000_nvm_operations_no_const ops;
29128
29129 enum e1000_nvm_type type;
29130 enum e1000_nvm_override override;
29131 diff -urNp linux-3.0.4/drivers/net/hamradio/6pack.c linux-3.0.4/drivers/net/hamradio/6pack.c
29132 --- linux-3.0.4/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
29133 +++ linux-3.0.4/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
29134 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
29135 unsigned char buf[512];
29136 int count1;
29137
29138 + pax_track_stack();
29139 +
29140 if (!count)
29141 return;
29142
29143 diff -urNp linux-3.0.4/drivers/net/igb/e1000_hw.h linux-3.0.4/drivers/net/igb/e1000_hw.h
29144 --- linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
29145 +++ linux-3.0.4/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
29146 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
29147 s32 (*read_mac_addr)(struct e1000_hw *);
29148 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
29149 };
29150 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29151
29152 struct e1000_phy_operations {
29153 s32 (*acquire)(struct e1000_hw *);
29154 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
29155 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
29156 s32 (*write_reg)(struct e1000_hw *, u32, u16);
29157 };
29158 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
29159
29160 struct e1000_nvm_operations {
29161 s32 (*acquire)(struct e1000_hw *);
29162 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
29163 s32 (*update)(struct e1000_hw *);
29164 s32 (*validate)(struct e1000_hw *);
29165 };
29166 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
29167
29168 struct e1000_info {
29169 s32 (*get_invariants)(struct e1000_hw *);
29170 @@ -350,7 +353,7 @@ struct e1000_info {
29171 extern const struct e1000_info e1000_82575_info;
29172
29173 struct e1000_mac_info {
29174 - struct e1000_mac_operations ops;
29175 + e1000_mac_operations_no_const ops;
29176
29177 u8 addr[6];
29178 u8 perm_addr[6];
29179 @@ -388,7 +391,7 @@ struct e1000_mac_info {
29180 };
29181
29182 struct e1000_phy_info {
29183 - struct e1000_phy_operations ops;
29184 + e1000_phy_operations_no_const ops;
29185
29186 enum e1000_phy_type type;
29187
29188 @@ -423,7 +426,7 @@ struct e1000_phy_info {
29189 };
29190
29191 struct e1000_nvm_info {
29192 - struct e1000_nvm_operations ops;
29193 + e1000_nvm_operations_no_const ops;
29194 enum e1000_nvm_type type;
29195 enum e1000_nvm_override override;
29196
29197 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
29198 s32 (*check_for_ack)(struct e1000_hw *, u16);
29199 s32 (*check_for_rst)(struct e1000_hw *, u16);
29200 };
29201 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
29202
29203 struct e1000_mbx_stats {
29204 u32 msgs_tx;
29205 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
29206 };
29207
29208 struct e1000_mbx_info {
29209 - struct e1000_mbx_operations ops;
29210 + e1000_mbx_operations_no_const ops;
29211 struct e1000_mbx_stats stats;
29212 u32 timeout;
29213 u32 usec_delay;
29214 diff -urNp linux-3.0.4/drivers/net/igbvf/vf.h linux-3.0.4/drivers/net/igbvf/vf.h
29215 --- linux-3.0.4/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
29216 +++ linux-3.0.4/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
29217 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
29218 s32 (*read_mac_addr)(struct e1000_hw *);
29219 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
29220 };
29221 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
29222
29223 struct e1000_mac_info {
29224 - struct e1000_mac_operations ops;
29225 + e1000_mac_operations_no_const ops;
29226 u8 addr[6];
29227 u8 perm_addr[6];
29228
29229 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
29230 s32 (*check_for_ack)(struct e1000_hw *);
29231 s32 (*check_for_rst)(struct e1000_hw *);
29232 };
29233 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
29234
29235 struct e1000_mbx_stats {
29236 u32 msgs_tx;
29237 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
29238 };
29239
29240 struct e1000_mbx_info {
29241 - struct e1000_mbx_operations ops;
29242 + e1000_mbx_operations_no_const ops;
29243 struct e1000_mbx_stats stats;
29244 u32 timeout;
29245 u32 usec_delay;
29246 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_main.c linux-3.0.4/drivers/net/ixgb/ixgb_main.c
29247 --- linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
29248 +++ linux-3.0.4/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
29249 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
29250 u32 rctl;
29251 int i;
29252
29253 + pax_track_stack();
29254 +
29255 /* Check for Promiscuous and All Multicast modes */
29256
29257 rctl = IXGB_READ_REG(hw, RCTL);
29258 diff -urNp linux-3.0.4/drivers/net/ixgb/ixgb_param.c linux-3.0.4/drivers/net/ixgb/ixgb_param.c
29259 --- linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
29260 +++ linux-3.0.4/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
29261 @@ -261,6 +261,9 @@ void __devinit
29262 ixgb_check_options(struct ixgb_adapter *adapter)
29263 {
29264 int bd = adapter->bd_number;
29265 +
29266 + pax_track_stack();
29267 +
29268 if (bd >= IXGB_MAX_NIC) {
29269 pr_notice("Warning: no configuration for board #%i\n", bd);
29270 pr_notice("Using defaults for all values\n");
29271 diff -urNp linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h
29272 --- linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
29273 +++ linux-3.0.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
29274 @@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
29275 s32 (*update_checksum)(struct ixgbe_hw *);
29276 u16 (*calc_checksum)(struct ixgbe_hw *);
29277 };
29278 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
29279
29280 struct ixgbe_mac_operations {
29281 s32 (*init_hw)(struct ixgbe_hw *);
29282 @@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
29283 /* Flow Control */
29284 s32 (*fc_enable)(struct ixgbe_hw *, s32);
29285 };
29286 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
29287
29288 struct ixgbe_phy_operations {
29289 s32 (*identify)(struct ixgbe_hw *);
29290 @@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
29291 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
29292 s32 (*check_overtemp)(struct ixgbe_hw *);
29293 };
29294 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
29295
29296 struct ixgbe_eeprom_info {
29297 - struct ixgbe_eeprom_operations ops;
29298 + ixgbe_eeprom_operations_no_const ops;
29299 enum ixgbe_eeprom_type type;
29300 u32 semaphore_delay;
29301 u16 word_size;
29302 @@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
29303
29304 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
29305 struct ixgbe_mac_info {
29306 - struct ixgbe_mac_operations ops;
29307 + ixgbe_mac_operations_no_const ops;
29308 enum ixgbe_mac_type type;
29309 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
29310 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
29311 @@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
29312 };
29313
29314 struct ixgbe_phy_info {
29315 - struct ixgbe_phy_operations ops;
29316 + ixgbe_phy_operations_no_const ops;
29317 struct mdio_if_info mdio;
29318 enum ixgbe_phy_type type;
29319 u32 id;
29320 @@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
29321 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
29322 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
29323 };
29324 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
29325
29326 struct ixgbe_mbx_stats {
29327 u32 msgs_tx;
29328 @@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
29329 };
29330
29331 struct ixgbe_mbx_info {
29332 - struct ixgbe_mbx_operations ops;
29333 + ixgbe_mbx_operations_no_const ops;
29334 struct ixgbe_mbx_stats stats;
29335 u32 timeout;
29336 u32 usec_delay;
29337 diff -urNp linux-3.0.4/drivers/net/ixgbevf/vf.h linux-3.0.4/drivers/net/ixgbevf/vf.h
29338 --- linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
29339 +++ linux-3.0.4/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
29340 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
29341 s32 (*clear_vfta)(struct ixgbe_hw *);
29342 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
29343 };
29344 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
29345
29346 enum ixgbe_mac_type {
29347 ixgbe_mac_unknown = 0,
29348 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
29349 };
29350
29351 struct ixgbe_mac_info {
29352 - struct ixgbe_mac_operations ops;
29353 + ixgbe_mac_operations_no_const ops;
29354 u8 addr[6];
29355 u8 perm_addr[6];
29356
29357 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
29358 s32 (*check_for_ack)(struct ixgbe_hw *);
29359 s32 (*check_for_rst)(struct ixgbe_hw *);
29360 };
29361 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
29362
29363 struct ixgbe_mbx_stats {
29364 u32 msgs_tx;
29365 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
29366 };
29367
29368 struct ixgbe_mbx_info {
29369 - struct ixgbe_mbx_operations ops;
29370 + ixgbe_mbx_operations_no_const ops;
29371 struct ixgbe_mbx_stats stats;
29372 u32 timeout;
29373 u32 udelay;
29374 diff -urNp linux-3.0.4/drivers/net/ksz884x.c linux-3.0.4/drivers/net/ksz884x.c
29375 --- linux-3.0.4/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
29376 +++ linux-3.0.4/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
29377 @@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
29378 int rc;
29379 u64 counter[TOTAL_PORT_COUNTER_NUM];
29380
29381 + pax_track_stack();
29382 +
29383 mutex_lock(&hw_priv->lock);
29384 n = SWITCH_PORT_NUM;
29385 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
29386 diff -urNp linux-3.0.4/drivers/net/mlx4/main.c linux-3.0.4/drivers/net/mlx4/main.c
29387 --- linux-3.0.4/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
29388 +++ linux-3.0.4/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
29389 @@ -40,6 +40,7 @@
29390 #include <linux/dma-mapping.h>
29391 #include <linux/slab.h>
29392 #include <linux/io-mapping.h>
29393 +#include <linux/sched.h>
29394
29395 #include <linux/mlx4/device.h>
29396 #include <linux/mlx4/doorbell.h>
29397 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
29398 u64 icm_size;
29399 int err;
29400
29401 + pax_track_stack();
29402 +
29403 err = mlx4_QUERY_FW(dev);
29404 if (err) {
29405 if (err == -EACCES)
29406 diff -urNp linux-3.0.4/drivers/net/niu.c linux-3.0.4/drivers/net/niu.c
29407 --- linux-3.0.4/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
29408 +++ linux-3.0.4/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
29409 @@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
29410 int i, num_irqs, err;
29411 u8 first_ldg;
29412
29413 + pax_track_stack();
29414 +
29415 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29416 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29417 ldg_num_map[i] = first_ldg + i;
29418 diff -urNp linux-3.0.4/drivers/net/pcnet32.c linux-3.0.4/drivers/net/pcnet32.c
29419 --- linux-3.0.4/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
29420 +++ linux-3.0.4/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
29421 @@ -82,7 +82,7 @@ static int cards_found;
29422 /*
29423 * VLB I/O addresses
29424 */
29425 -static unsigned int pcnet32_portlist[] __initdata =
29426 +static unsigned int pcnet32_portlist[] __devinitdata =
29427 { 0x300, 0x320, 0x340, 0x360, 0 };
29428
29429 static int pcnet32_debug;
29430 @@ -270,7 +270,7 @@ struct pcnet32_private {
29431 struct sk_buff **rx_skbuff;
29432 dma_addr_t *tx_dma_addr;
29433 dma_addr_t *rx_dma_addr;
29434 - struct pcnet32_access a;
29435 + struct pcnet32_access *a;
29436 spinlock_t lock; /* Guard lock */
29437 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29438 unsigned int rx_ring_size; /* current rx ring size */
29439 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29440 u16 val;
29441
29442 netif_wake_queue(dev);
29443 - val = lp->a.read_csr(ioaddr, CSR3);
29444 + val = lp->a->read_csr(ioaddr, CSR3);
29445 val &= 0x00ff;
29446 - lp->a.write_csr(ioaddr, CSR3, val);
29447 + lp->a->write_csr(ioaddr, CSR3, val);
29448 napi_enable(&lp->napi);
29449 }
29450
29451 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29452 r = mii_link_ok(&lp->mii_if);
29453 } else if (lp->chip_version >= PCNET32_79C970A) {
29454 ulong ioaddr = dev->base_addr; /* card base I/O address */
29455 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29456 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29457 } else { /* can not detect link on really old chips */
29458 r = 1;
29459 }
29460 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29461 pcnet32_netif_stop(dev);
29462
29463 spin_lock_irqsave(&lp->lock, flags);
29464 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29465 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29466
29467 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29468
29469 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29470 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29471 {
29472 struct pcnet32_private *lp = netdev_priv(dev);
29473 - struct pcnet32_access *a = &lp->a; /* access to registers */
29474 + struct pcnet32_access *a = lp->a; /* access to registers */
29475 ulong ioaddr = dev->base_addr; /* card base I/O address */
29476 struct sk_buff *skb; /* sk buff */
29477 int x, i; /* counters */
29478 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29479 pcnet32_netif_stop(dev);
29480
29481 spin_lock_irqsave(&lp->lock, flags);
29482 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29483 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29484
29485 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29486
29487 /* Reset the PCNET32 */
29488 - lp->a.reset(ioaddr);
29489 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29490 + lp->a->reset(ioaddr);
29491 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29492
29493 /* switch pcnet32 to 32bit mode */
29494 - lp->a.write_bcr(ioaddr, 20, 2);
29495 + lp->a->write_bcr(ioaddr, 20, 2);
29496
29497 /* purge & init rings but don't actually restart */
29498 pcnet32_restart(dev, 0x0000);
29499
29500 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29501 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29502
29503 /* Initialize Transmit buffers. */
29504 size = data_len + 15;
29505 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29506
29507 /* set int loopback in CSR15 */
29508 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29509 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29510 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29511
29512 teststatus = cpu_to_le16(0x8000);
29513 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29514 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29515
29516 /* Check status of descriptors */
29517 for (x = 0; x < numbuffs; x++) {
29518 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29519 }
29520 }
29521
29522 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29523 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29524 wmb();
29525 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29526 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29527 @@ -1015,7 +1015,7 @@ clean_up:
29528 pcnet32_restart(dev, CSR0_NORMAL);
29529 } else {
29530 pcnet32_purge_rx_ring(dev);
29531 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29532 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29533 }
29534 spin_unlock_irqrestore(&lp->lock, flags);
29535
29536 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
29537 enum ethtool_phys_id_state state)
29538 {
29539 struct pcnet32_private *lp = netdev_priv(dev);
29540 - struct pcnet32_access *a = &lp->a;
29541 + struct pcnet32_access *a = lp->a;
29542 ulong ioaddr = dev->base_addr;
29543 unsigned long flags;
29544 int i;
29545 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
29546 {
29547 int csr5;
29548 struct pcnet32_private *lp = netdev_priv(dev);
29549 - struct pcnet32_access *a = &lp->a;
29550 + struct pcnet32_access *a = lp->a;
29551 ulong ioaddr = dev->base_addr;
29552 int ticks;
29553
29554 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
29555 spin_lock_irqsave(&lp->lock, flags);
29556 if (pcnet32_tx(dev)) {
29557 /* reset the chip to clear the error condition, then restart */
29558 - lp->a.reset(ioaddr);
29559 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29560 + lp->a->reset(ioaddr);
29561 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29562 pcnet32_restart(dev, CSR0_START);
29563 netif_wake_queue(dev);
29564 }
29565 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
29566 __napi_complete(napi);
29567
29568 /* clear interrupt masks */
29569 - val = lp->a.read_csr(ioaddr, CSR3);
29570 + val = lp->a->read_csr(ioaddr, CSR3);
29571 val &= 0x00ff;
29572 - lp->a.write_csr(ioaddr, CSR3, val);
29573 + lp->a->write_csr(ioaddr, CSR3, val);
29574
29575 /* Set interrupt enable. */
29576 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29577 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29578
29579 spin_unlock_irqrestore(&lp->lock, flags);
29580 }
29581 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
29582 int i, csr0;
29583 u16 *buff = ptr;
29584 struct pcnet32_private *lp = netdev_priv(dev);
29585 - struct pcnet32_access *a = &lp->a;
29586 + struct pcnet32_access *a = lp->a;
29587 ulong ioaddr = dev->base_addr;
29588 unsigned long flags;
29589
29590 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
29591 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29592 if (lp->phymask & (1 << j)) {
29593 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29594 - lp->a.write_bcr(ioaddr, 33,
29595 + lp->a->write_bcr(ioaddr, 33,
29596 (j << 5) | i);
29597 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29598 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29599 }
29600 }
29601 }
29602 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29603 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29604 lp->options |= PCNET32_PORT_FD;
29605
29606 - lp->a = *a;
29607 + lp->a = a;
29608
29609 /* prior to register_netdev, dev->name is not yet correct */
29610 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29611 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29612 if (lp->mii) {
29613 /* lp->phycount and lp->phymask are set to 0 by memset above */
29614
29615 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29616 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29617 /* scan for PHYs */
29618 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29619 unsigned short id1, id2;
29620 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29621 pr_info("Found PHY %04x:%04x at address %d\n",
29622 id1, id2, i);
29623 }
29624 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29625 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29626 if (lp->phycount > 1)
29627 lp->options |= PCNET32_PORT_MII;
29628 }
29629 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
29630 }
29631
29632 /* Reset the PCNET32 */
29633 - lp->a.reset(ioaddr);
29634 + lp->a->reset(ioaddr);
29635
29636 /* switch pcnet32 to 32bit mode */
29637 - lp->a.write_bcr(ioaddr, 20, 2);
29638 + lp->a->write_bcr(ioaddr, 20, 2);
29639
29640 netif_printk(lp, ifup, KERN_DEBUG, dev,
29641 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29642 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
29643 (u32) (lp->init_dma_addr));
29644
29645 /* set/reset autoselect bit */
29646 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29647 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29648 if (lp->options & PCNET32_PORT_ASEL)
29649 val |= 2;
29650 - lp->a.write_bcr(ioaddr, 2, val);
29651 + lp->a->write_bcr(ioaddr, 2, val);
29652
29653 /* handle full duplex setting */
29654 if (lp->mii_if.full_duplex) {
29655 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29656 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29657 if (lp->options & PCNET32_PORT_FD) {
29658 val |= 1;
29659 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29660 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
29661 if (lp->chip_version == 0x2627)
29662 val |= 3;
29663 }
29664 - lp->a.write_bcr(ioaddr, 9, val);
29665 + lp->a->write_bcr(ioaddr, 9, val);
29666 }
29667
29668 /* set/reset GPSI bit in test register */
29669 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29670 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29671 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29672 val |= 0x10;
29673 - lp->a.write_csr(ioaddr, 124, val);
29674 + lp->a->write_csr(ioaddr, 124, val);
29675
29676 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29677 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29678 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
29679 * duplex, and/or enable auto negotiation, and clear DANAS
29680 */
29681 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29682 - lp->a.write_bcr(ioaddr, 32,
29683 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29684 + lp->a->write_bcr(ioaddr, 32,
29685 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29686 /* disable Auto Negotiation, set 10Mpbs, HD */
29687 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29688 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29689 if (lp->options & PCNET32_PORT_FD)
29690 val |= 0x10;
29691 if (lp->options & PCNET32_PORT_100)
29692 val |= 0x08;
29693 - lp->a.write_bcr(ioaddr, 32, val);
29694 + lp->a->write_bcr(ioaddr, 32, val);
29695 } else {
29696 if (lp->options & PCNET32_PORT_ASEL) {
29697 - lp->a.write_bcr(ioaddr, 32,
29698 - lp->a.read_bcr(ioaddr,
29699 + lp->a->write_bcr(ioaddr, 32,
29700 + lp->a->read_bcr(ioaddr,
29701 32) | 0x0080);
29702 /* enable auto negotiate, setup, disable fd */
29703 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29704 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29705 val |= 0x20;
29706 - lp->a.write_bcr(ioaddr, 32, val);
29707 + lp->a->write_bcr(ioaddr, 32, val);
29708 }
29709 }
29710 } else {
29711 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
29712 * There is really no good other way to handle multiple PHYs
29713 * other than turning off all automatics
29714 */
29715 - val = lp->a.read_bcr(ioaddr, 2);
29716 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29717 - val = lp->a.read_bcr(ioaddr, 32);
29718 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29719 + val = lp->a->read_bcr(ioaddr, 2);
29720 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29721 + val = lp->a->read_bcr(ioaddr, 32);
29722 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29723
29724 if (!(lp->options & PCNET32_PORT_ASEL)) {
29725 /* setup ecmd */
29726 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
29727 ethtool_cmd_speed_set(&ecmd,
29728 (lp->options & PCNET32_PORT_100) ?
29729 SPEED_100 : SPEED_10);
29730 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29731 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29732
29733 if (lp->options & PCNET32_PORT_FD) {
29734 ecmd.duplex = DUPLEX_FULL;
29735 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
29736 ecmd.duplex = DUPLEX_HALF;
29737 bcr9 |= ~(1 << 0);
29738 }
29739 - lp->a.write_bcr(ioaddr, 9, bcr9);
29740 + lp->a->write_bcr(ioaddr, 9, bcr9);
29741 }
29742
29743 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29744 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
29745
29746 #ifdef DO_DXSUFLO
29747 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29748 - val = lp->a.read_csr(ioaddr, CSR3);
29749 + val = lp->a->read_csr(ioaddr, CSR3);
29750 val |= 0x40;
29751 - lp->a.write_csr(ioaddr, CSR3, val);
29752 + lp->a->write_csr(ioaddr, CSR3, val);
29753 }
29754 #endif
29755
29756 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
29757 napi_enable(&lp->napi);
29758
29759 /* Re-initialize the PCNET32, and start it when done. */
29760 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29761 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29762 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29763 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29764
29765 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29766 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29767 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29768 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29769
29770 netif_start_queue(dev);
29771
29772 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
29773
29774 i = 0;
29775 while (i++ < 100)
29776 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29777 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29778 break;
29779 /*
29780 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29781 * reports that doing so triggers a bug in the '974.
29782 */
29783 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29784 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29785
29786 netif_printk(lp, ifup, KERN_DEBUG, dev,
29787 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29788 i,
29789 (u32) (lp->init_dma_addr),
29790 - lp->a.read_csr(ioaddr, CSR0));
29791 + lp->a->read_csr(ioaddr, CSR0));
29792
29793 spin_unlock_irqrestore(&lp->lock, flags);
29794
29795 @@ -2218,7 +2218,7 @@ err_free_ring:
29796 * Switch back to 16bit mode to avoid problems with dumb
29797 * DOS packet driver after a warm reboot
29798 */
29799 - lp->a.write_bcr(ioaddr, 20, 4);
29800 + lp->a->write_bcr(ioaddr, 20, 4);
29801
29802 err_free_irq:
29803 spin_unlock_irqrestore(&lp->lock, flags);
29804 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
29805
29806 /* wait for stop */
29807 for (i = 0; i < 100; i++)
29808 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29809 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29810 break;
29811
29812 if (i >= 100)
29813 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
29814 return;
29815
29816 /* ReInit Ring */
29817 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29818 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29819 i = 0;
29820 while (i++ < 1000)
29821 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29822 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29823 break;
29824
29825 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29826 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29827 }
29828
29829 static void pcnet32_tx_timeout(struct net_device *dev)
29830 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
29831 /* Transmitter timeout, serious problems. */
29832 if (pcnet32_debug & NETIF_MSG_DRV)
29833 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29834 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29835 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29836 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29837 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29838 dev->stats.tx_errors++;
29839 if (netif_msg_tx_err(lp)) {
29840 int i;
29841 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29842
29843 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29844 "%s() called, csr0 %4.4x\n",
29845 - __func__, lp->a.read_csr(ioaddr, CSR0));
29846 + __func__, lp->a->read_csr(ioaddr, CSR0));
29847
29848 /* Default status -- will not enable Successful-TxDone
29849 * interrupt when that option is available to us.
29850 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29851 dev->stats.tx_bytes += skb->len;
29852
29853 /* Trigger an immediate send poll. */
29854 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29855 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29856
29857 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29858 lp->tx_full = 1;
29859 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29860
29861 spin_lock(&lp->lock);
29862
29863 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29864 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29865 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29866 if (csr0 == 0xffff)
29867 break; /* PCMCIA remove happened */
29868 /* Acknowledge all of the current interrupt sources ASAP. */
29869 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29870 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29871
29872 netif_printk(lp, intr, KERN_DEBUG, dev,
29873 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29874 - csr0, lp->a.read_csr(ioaddr, CSR0));
29875 + csr0, lp->a->read_csr(ioaddr, CSR0));
29876
29877 /* Log misc errors. */
29878 if (csr0 & 0x4000)
29879 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29880 if (napi_schedule_prep(&lp->napi)) {
29881 u16 val;
29882 /* set interrupt masks */
29883 - val = lp->a.read_csr(ioaddr, CSR3);
29884 + val = lp->a->read_csr(ioaddr, CSR3);
29885 val |= 0x5f00;
29886 - lp->a.write_csr(ioaddr, CSR3, val);
29887 + lp->a->write_csr(ioaddr, CSR3, val);
29888
29889 __napi_schedule(&lp->napi);
29890 break;
29891 }
29892 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29893 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29894 }
29895
29896 netif_printk(lp, intr, KERN_DEBUG, dev,
29897 "exiting interrupt, csr0=%#4.4x\n",
29898 - lp->a.read_csr(ioaddr, CSR0));
29899 + lp->a->read_csr(ioaddr, CSR0));
29900
29901 spin_unlock(&lp->lock);
29902
29903 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
29904
29905 spin_lock_irqsave(&lp->lock, flags);
29906
29907 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29908 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29909
29910 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29911 "Shutting down ethercard, status was %2.2x\n",
29912 - lp->a.read_csr(ioaddr, CSR0));
29913 + lp->a->read_csr(ioaddr, CSR0));
29914
29915 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29916 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29917 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29918
29919 /*
29920 * Switch back to 16bit mode to avoid problems with dumb
29921 * DOS packet driver after a warm reboot
29922 */
29923 - lp->a.write_bcr(ioaddr, 20, 4);
29924 + lp->a->write_bcr(ioaddr, 20, 4);
29925
29926 spin_unlock_irqrestore(&lp->lock, flags);
29927
29928 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
29929 unsigned long flags;
29930
29931 spin_lock_irqsave(&lp->lock, flags);
29932 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29933 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29934 spin_unlock_irqrestore(&lp->lock, flags);
29935
29936 return &dev->stats;
29937 @@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
29938 if (dev->flags & IFF_ALLMULTI) {
29939 ib->filter[0] = cpu_to_le32(~0U);
29940 ib->filter[1] = cpu_to_le32(~0U);
29941 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29942 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29943 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29944 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29945 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29946 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29947 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29948 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29949 return;
29950 }
29951 /* clear the multicast filter */
29952 @@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
29953 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29954 }
29955 for (i = 0; i < 4; i++)
29956 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29957 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29958 le16_to_cpu(mcast_table[i]));
29959 }
29960
29961 @@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
29962
29963 spin_lock_irqsave(&lp->lock, flags);
29964 suspended = pcnet32_suspend(dev, &flags, 0);
29965 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29966 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29967 if (dev->flags & IFF_PROMISC) {
29968 /* Log any net taps. */
29969 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29970 lp->init_block->mode =
29971 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29972 7);
29973 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29974 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29975 } else {
29976 lp->init_block->mode =
29977 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29978 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29979 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29980 pcnet32_load_multicast(dev);
29981 }
29982
29983 if (suspended) {
29984 int csr5;
29985 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29986 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29987 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29988 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29989 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29990 } else {
29991 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29992 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29993 pcnet32_restart(dev, CSR0_NORMAL);
29994 netif_wake_queue(dev);
29995 }
29996 @@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
29997 if (!lp->mii)
29998 return 0;
29999
30000 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30001 - val_out = lp->a.read_bcr(ioaddr, 34);
30002 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30003 + val_out = lp->a->read_bcr(ioaddr, 34);
30004
30005 return val_out;
30006 }
30007 @@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
30008 if (!lp->mii)
30009 return;
30010
30011 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30012 - lp->a.write_bcr(ioaddr, 34, val);
30013 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
30014 + lp->a->write_bcr(ioaddr, 34, val);
30015 }
30016
30017 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
30018 @@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
30019 curr_link = mii_link_ok(&lp->mii_if);
30020 } else {
30021 ulong ioaddr = dev->base_addr; /* card base I/O address */
30022 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
30023 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
30024 }
30025 if (!curr_link) {
30026 if (prev_link || verbose) {
30027 @@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
30028 (ecmd.duplex == DUPLEX_FULL)
30029 ? "full" : "half");
30030 }
30031 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
30032 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
30033 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
30034 if (lp->mii_if.full_duplex)
30035 bcr9 |= (1 << 0);
30036 else
30037 bcr9 &= ~(1 << 0);
30038 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
30039 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
30040 }
30041 } else {
30042 netif_info(lp, link, dev, "link up\n");
30043 diff -urNp linux-3.0.4/drivers/net/ppp_generic.c linux-3.0.4/drivers/net/ppp_generic.c
30044 --- linux-3.0.4/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
30045 +++ linux-3.0.4/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
30046 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
30047 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
30048 struct ppp_stats stats;
30049 struct ppp_comp_stats cstats;
30050 - char *vers;
30051
30052 switch (cmd) {
30053 case SIOCGPPPSTATS:
30054 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
30055 break;
30056
30057 case SIOCGPPPVER:
30058 - vers = PPP_VERSION;
30059 - if (copy_to_user(addr, vers, strlen(vers) + 1))
30060 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
30061 break;
30062 err = 0;
30063 break;
30064 diff -urNp linux-3.0.4/drivers/net/r8169.c linux-3.0.4/drivers/net/r8169.c
30065 --- linux-3.0.4/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
30066 +++ linux-3.0.4/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
30067 @@ -645,12 +645,12 @@ struct rtl8169_private {
30068 struct mdio_ops {
30069 void (*write)(void __iomem *, int, int);
30070 int (*read)(void __iomem *, int);
30071 - } mdio_ops;
30072 + } __no_const mdio_ops;
30073
30074 struct pll_power_ops {
30075 void (*down)(struct rtl8169_private *);
30076 void (*up)(struct rtl8169_private *);
30077 - } pll_power_ops;
30078 + } __no_const pll_power_ops;
30079
30080 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
30081 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
30082 diff -urNp linux-3.0.4/drivers/net/tg3.h linux-3.0.4/drivers/net/tg3.h
30083 --- linux-3.0.4/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
30084 +++ linux-3.0.4/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
30085 @@ -134,6 +134,7 @@
30086 #define CHIPREV_ID_5750_A0 0x4000
30087 #define CHIPREV_ID_5750_A1 0x4001
30088 #define CHIPREV_ID_5750_A3 0x4003
30089 +#define CHIPREV_ID_5750_C1 0x4201
30090 #define CHIPREV_ID_5750_C2 0x4202
30091 #define CHIPREV_ID_5752_A0_HW 0x5000
30092 #define CHIPREV_ID_5752_A0 0x6000
30093 diff -urNp linux-3.0.4/drivers/net/tokenring/abyss.c linux-3.0.4/drivers/net/tokenring/abyss.c
30094 --- linux-3.0.4/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
30095 +++ linux-3.0.4/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
30096 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
30097
30098 static int __init abyss_init (void)
30099 {
30100 - abyss_netdev_ops = tms380tr_netdev_ops;
30101 + pax_open_kernel();
30102 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30103
30104 - abyss_netdev_ops.ndo_open = abyss_open;
30105 - abyss_netdev_ops.ndo_stop = abyss_close;
30106 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
30107 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
30108 + pax_close_kernel();
30109
30110 return pci_register_driver(&abyss_driver);
30111 }
30112 diff -urNp linux-3.0.4/drivers/net/tokenring/madgemc.c linux-3.0.4/drivers/net/tokenring/madgemc.c
30113 --- linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
30114 +++ linux-3.0.4/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
30115 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
30116
30117 static int __init madgemc_init (void)
30118 {
30119 - madgemc_netdev_ops = tms380tr_netdev_ops;
30120 - madgemc_netdev_ops.ndo_open = madgemc_open;
30121 - madgemc_netdev_ops.ndo_stop = madgemc_close;
30122 + pax_open_kernel();
30123 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30124 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
30125 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
30126 + pax_close_kernel();
30127
30128 return mca_register_driver (&madgemc_driver);
30129 }
30130 diff -urNp linux-3.0.4/drivers/net/tokenring/proteon.c linux-3.0.4/drivers/net/tokenring/proteon.c
30131 --- linux-3.0.4/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
30132 +++ linux-3.0.4/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
30133 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
30134 struct platform_device *pdev;
30135 int i, num = 0, err = 0;
30136
30137 - proteon_netdev_ops = tms380tr_netdev_ops;
30138 - proteon_netdev_ops.ndo_open = proteon_open;
30139 - proteon_netdev_ops.ndo_stop = tms380tr_close;
30140 + pax_open_kernel();
30141 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30142 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
30143 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
30144 + pax_close_kernel();
30145
30146 err = platform_driver_register(&proteon_driver);
30147 if (err)
30148 diff -urNp linux-3.0.4/drivers/net/tokenring/skisa.c linux-3.0.4/drivers/net/tokenring/skisa.c
30149 --- linux-3.0.4/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
30150 +++ linux-3.0.4/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
30151 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
30152 struct platform_device *pdev;
30153 int i, num = 0, err = 0;
30154
30155 - sk_isa_netdev_ops = tms380tr_netdev_ops;
30156 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
30157 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30158 + pax_open_kernel();
30159 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
30160 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
30161 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
30162 + pax_close_kernel();
30163
30164 err = platform_driver_register(&sk_isa_driver);
30165 if (err)
30166 diff -urNp linux-3.0.4/drivers/net/tulip/de2104x.c linux-3.0.4/drivers/net/tulip/de2104x.c
30167 --- linux-3.0.4/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
30168 +++ linux-3.0.4/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
30169 @@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
30170 struct de_srom_info_leaf *il;
30171 void *bufp;
30172
30173 + pax_track_stack();
30174 +
30175 /* download entire eeprom */
30176 for (i = 0; i < DE_EEPROM_WORDS; i++)
30177 ((__le16 *)ee_data)[i] =
30178 diff -urNp linux-3.0.4/drivers/net/tulip/de4x5.c linux-3.0.4/drivers/net/tulip/de4x5.c
30179 --- linux-3.0.4/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
30180 +++ linux-3.0.4/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
30181 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
30182 for (i=0; i<ETH_ALEN; i++) {
30183 tmp.addr[i] = dev->dev_addr[i];
30184 }
30185 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30186 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
30187 break;
30188
30189 case DE4X5_SET_HWADDR: /* Set the hardware address */
30190 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
30191 spin_lock_irqsave(&lp->lock, flags);
30192 memcpy(&statbuf, &lp->pktStats, ioc->len);
30193 spin_unlock_irqrestore(&lp->lock, flags);
30194 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
30195 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
30196 return -EFAULT;
30197 break;
30198 }
30199 diff -urNp linux-3.0.4/drivers/net/usb/hso.c linux-3.0.4/drivers/net/usb/hso.c
30200 --- linux-3.0.4/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
30201 +++ linux-3.0.4/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
30202 @@ -71,7 +71,7 @@
30203 #include <asm/byteorder.h>
30204 #include <linux/serial_core.h>
30205 #include <linux/serial.h>
30206 -
30207 +#include <asm/local.h>
30208
30209 #define MOD_AUTHOR "Option Wireless"
30210 #define MOD_DESCRIPTION "USB High Speed Option driver"
30211 @@ -257,7 +257,7 @@ struct hso_serial {
30212
30213 /* from usb_serial_port */
30214 struct tty_struct *tty;
30215 - int open_count;
30216 + local_t open_count;
30217 spinlock_t serial_lock;
30218
30219 int (*write_data) (struct hso_serial *serial);
30220 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
30221 struct urb *urb;
30222
30223 urb = serial->rx_urb[0];
30224 - if (serial->open_count > 0) {
30225 + if (local_read(&serial->open_count) > 0) {
30226 count = put_rxbuf_data(urb, serial);
30227 if (count == -1)
30228 return;
30229 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
30230 DUMP1(urb->transfer_buffer, urb->actual_length);
30231
30232 /* Anyone listening? */
30233 - if (serial->open_count == 0)
30234 + if (local_read(&serial->open_count) == 0)
30235 return;
30236
30237 if (status == 0) {
30238 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
30239 spin_unlock_irq(&serial->serial_lock);
30240
30241 /* check for port already opened, if not set the termios */
30242 - serial->open_count++;
30243 - if (serial->open_count == 1) {
30244 + if (local_inc_return(&serial->open_count) == 1) {
30245 serial->rx_state = RX_IDLE;
30246 /* Force default termio settings */
30247 _hso_serial_set_termios(tty, NULL);
30248 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
30249 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
30250 if (result) {
30251 hso_stop_serial_device(serial->parent);
30252 - serial->open_count--;
30253 + local_dec(&serial->open_count);
30254 kref_put(&serial->parent->ref, hso_serial_ref_free);
30255 }
30256 } else {
30257 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
30258
30259 /* reset the rts and dtr */
30260 /* do the actual close */
30261 - serial->open_count--;
30262 + local_dec(&serial->open_count);
30263
30264 - if (serial->open_count <= 0) {
30265 - serial->open_count = 0;
30266 + if (local_read(&serial->open_count) <= 0) {
30267 + local_set(&serial->open_count, 0);
30268 spin_lock_irq(&serial->serial_lock);
30269 if (serial->tty == tty) {
30270 serial->tty->driver_data = NULL;
30271 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
30272
30273 /* the actual setup */
30274 spin_lock_irqsave(&serial->serial_lock, flags);
30275 - if (serial->open_count)
30276 + if (local_read(&serial->open_count))
30277 _hso_serial_set_termios(tty, old);
30278 else
30279 tty->termios = old;
30280 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
30281 D1("Pending read interrupt on port %d\n", i);
30282 spin_lock(&serial->serial_lock);
30283 if (serial->rx_state == RX_IDLE &&
30284 - serial->open_count > 0) {
30285 + local_read(&serial->open_count) > 0) {
30286 /* Setup and send a ctrl req read on
30287 * port i */
30288 if (!serial->rx_urb_filled[0]) {
30289 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
30290 /* Start all serial ports */
30291 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
30292 if (serial_table[i] && (serial_table[i]->interface == iface)) {
30293 - if (dev2ser(serial_table[i])->open_count) {
30294 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
30295 result =
30296 hso_start_serial_device(serial_table[i], GFP_NOIO);
30297 hso_kick_transmit(dev2ser(serial_table[i]));
30298 diff -urNp linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
30299 --- linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
30300 +++ linux-3.0.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
30301 @@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
30302 * Return with error code if any of the queue indices
30303 * is out of range
30304 */
30305 - if (p->ring_index[i] < 0 ||
30306 - p->ring_index[i] >= adapter->num_rx_queues)
30307 + if (p->ring_index[i] >= adapter->num_rx_queues)
30308 return -EINVAL;
30309 }
30310
30311 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-config.h linux-3.0.4/drivers/net/vxge/vxge-config.h
30312 --- linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
30313 +++ linux-3.0.4/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
30314 @@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
30315 void (*link_down)(struct __vxge_hw_device *devh);
30316 void (*crit_err)(struct __vxge_hw_device *devh,
30317 enum vxge_hw_event type, u64 ext_data);
30318 -};
30319 +} __no_const;
30320
30321 /*
30322 * struct __vxge_hw_blockpool_entry - Block private data structure
30323 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-main.c linux-3.0.4/drivers/net/vxge/vxge-main.c
30324 --- linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
30325 +++ linux-3.0.4/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
30326 @@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
30327 struct sk_buff *completed[NR_SKB_COMPLETED];
30328 int more;
30329
30330 + pax_track_stack();
30331 +
30332 do {
30333 more = 0;
30334 skb_ptr = completed;
30335 @@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
30336 u8 mtable[256] = {0}; /* CPU to vpath mapping */
30337 int index;
30338
30339 + pax_track_stack();
30340 +
30341 /*
30342 * Filling
30343 * - itable with bucket numbers
30344 diff -urNp linux-3.0.4/drivers/net/vxge/vxge-traffic.h linux-3.0.4/drivers/net/vxge/vxge-traffic.h
30345 --- linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
30346 +++ linux-3.0.4/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
30347 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
30348 struct vxge_hw_mempool_dma *dma_object,
30349 u32 index,
30350 u32 is_last);
30351 -};
30352 +} __no_const;
30353
30354 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
30355 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
30356 diff -urNp linux-3.0.4/drivers/net/wan/cycx_x25.c linux-3.0.4/drivers/net/wan/cycx_x25.c
30357 --- linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
30358 +++ linux-3.0.4/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
30359 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
30360 unsigned char hex[1024],
30361 * phex = hex;
30362
30363 + pax_track_stack();
30364 +
30365 if (len >= (sizeof(hex) / 2))
30366 len = (sizeof(hex) / 2) - 1;
30367
30368 diff -urNp linux-3.0.4/drivers/net/wan/hdlc_x25.c linux-3.0.4/drivers/net/wan/hdlc_x25.c
30369 --- linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
30370 +++ linux-3.0.4/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
30371 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
30372
30373 static int x25_open(struct net_device *dev)
30374 {
30375 - struct lapb_register_struct cb;
30376 + static struct lapb_register_struct cb = {
30377 + .connect_confirmation = x25_connected,
30378 + .connect_indication = x25_connected,
30379 + .disconnect_confirmation = x25_disconnected,
30380 + .disconnect_indication = x25_disconnected,
30381 + .data_indication = x25_data_indication,
30382 + .data_transmit = x25_data_transmit
30383 + };
30384 int result;
30385
30386 - cb.connect_confirmation = x25_connected;
30387 - cb.connect_indication = x25_connected;
30388 - cb.disconnect_confirmation = x25_disconnected;
30389 - cb.disconnect_indication = x25_disconnected;
30390 - cb.data_indication = x25_data_indication;
30391 - cb.data_transmit = x25_data_transmit;
30392 -
30393 result = lapb_register(dev, &cb);
30394 if (result != LAPB_OK)
30395 return result;
30396 diff -urNp linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c
30397 --- linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
30398 +++ linux-3.0.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
30399 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
30400 int do_autopm = 1;
30401 DECLARE_COMPLETION_ONSTACK(notif_completion);
30402
30403 + pax_track_stack();
30404 +
30405 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30406 i2400m, ack, ack_size);
30407 BUG_ON(_ack == i2400m->bm_ack_buf);
30408 diff -urNp linux-3.0.4/drivers/net/wireless/airo.c linux-3.0.4/drivers/net/wireless/airo.c
30409 --- linux-3.0.4/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
30410 +++ linux-3.0.4/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
30411 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
30412 BSSListElement * loop_net;
30413 BSSListElement * tmp_net;
30414
30415 + pax_track_stack();
30416 +
30417 /* Blow away current list of scan results */
30418 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30419 list_move_tail (&loop_net->list, &ai->network_free_list);
30420 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
30421 WepKeyRid wkr;
30422 int rc;
30423
30424 + pax_track_stack();
30425 +
30426 memset( &mySsid, 0, sizeof( mySsid ) );
30427 kfree (ai->flash);
30428 ai->flash = NULL;
30429 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
30430 __le32 *vals = stats.vals;
30431 int len;
30432
30433 + pax_track_stack();
30434 +
30435 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30436 return -ENOMEM;
30437 data = file->private_data;
30438 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
30439 /* If doLoseSync is not 1, we won't do a Lose Sync */
30440 int doLoseSync = -1;
30441
30442 + pax_track_stack();
30443 +
30444 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30445 return -ENOMEM;
30446 data = file->private_data;
30447 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
30448 int i;
30449 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30450
30451 + pax_track_stack();
30452 +
30453 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30454 if (!qual)
30455 return -ENOMEM;
30456 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
30457 CapabilityRid cap_rid;
30458 __le32 *vals = stats_rid.vals;
30459
30460 + pax_track_stack();
30461 +
30462 /* Get stats out of the card */
30463 clear_bit(JOB_WSTATS, &local->jobs);
30464 if (local->power.event) {
30465 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c
30466 --- linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
30467 +++ linux-3.0.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
30468 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30469 unsigned int v;
30470 u64 tsf;
30471
30472 + pax_track_stack();
30473 +
30474 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30475 len += snprintf(buf+len, sizeof(buf)-len,
30476 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30477 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30478 unsigned int len = 0;
30479 unsigned int i;
30480
30481 + pax_track_stack();
30482 +
30483 len += snprintf(buf+len, sizeof(buf)-len,
30484 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30485
30486 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30487 unsigned int i;
30488 unsigned int v;
30489
30490 + pax_track_stack();
30491 +
30492 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30493 sc->ah->ah_ant_mode);
30494 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30495 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30496 unsigned int len = 0;
30497 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30498
30499 + pax_track_stack();
30500 +
30501 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30502 sc->bssidmask);
30503 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30504 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30505 unsigned int len = 0;
30506 int i;
30507
30508 + pax_track_stack();
30509 +
30510 len += snprintf(buf+len, sizeof(buf)-len,
30511 "RX\n---------------------\n");
30512 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30513 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30514 char buf[700];
30515 unsigned int len = 0;
30516
30517 + pax_track_stack();
30518 +
30519 len += snprintf(buf+len, sizeof(buf)-len,
30520 "HW has PHY error counters:\t%s\n",
30521 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30522 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30523 struct ath5k_buf *bf, *bf0;
30524 int i, n;
30525
30526 + pax_track_stack();
30527 +
30528 len += snprintf(buf+len, sizeof(buf)-len,
30529 "available txbuffers: %d\n", sc->txbuf_len);
30530
30531 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30532 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
30533 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
30534 @@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30535 int i, im, j;
30536 int nmeasurement;
30537
30538 + pax_track_stack();
30539 +
30540 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30541 if (ah->txchainmask & (1 << i))
30542 num_chains++;
30543 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30544 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
30545 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
30546 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30547 int theta_low_bin = 0;
30548 int i;
30549
30550 + pax_track_stack();
30551 +
30552 /* disregard any bin that contains <= 16 samples */
30553 thresh_accum_cnt = 16;
30554 scale_factor = 5;
30555 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c
30556 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
30557 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
30558 @@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
30559 char buf[512];
30560 unsigned int len = 0;
30561
30562 + pax_track_stack();
30563 +
30564 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30565 len += snprintf(buf + len, sizeof(buf) - len,
30566 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30567 @@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
30568 u8 addr[ETH_ALEN];
30569 u32 tmp;
30570
30571 + pax_track_stack();
30572 +
30573 len += snprintf(buf + len, sizeof(buf) - len,
30574 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30575 wiphy_name(sc->hw->wiphy),
30576 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
30577 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
30578 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
30579 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
30580 unsigned int len = 0;
30581 int ret = 0;
30582
30583 + pax_track_stack();
30584 +
30585 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30586
30587 ath9k_htc_ps_wakeup(priv);
30588 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
30589 unsigned int len = 0;
30590 int ret = 0;
30591
30592 + pax_track_stack();
30593 +
30594 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30595
30596 ath9k_htc_ps_wakeup(priv);
30597 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
30598 unsigned int len = 0;
30599 int ret = 0;
30600
30601 + pax_track_stack();
30602 +
30603 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30604
30605 ath9k_htc_ps_wakeup(priv);
30606 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
30607 char buf[512];
30608 unsigned int len = 0;
30609
30610 + pax_track_stack();
30611 +
30612 len += snprintf(buf + len, sizeof(buf) - len,
30613 "%20s : %10u\n", "Buffers queued",
30614 priv->debug.tx_stats.buf_queued);
30615 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
30616 char buf[512];
30617 unsigned int len = 0;
30618
30619 + pax_track_stack();
30620 +
30621 spin_lock_bh(&priv->tx.tx_lock);
30622
30623 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
30624 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
30625 char buf[512];
30626 unsigned int len = 0;
30627
30628 + pax_track_stack();
30629 +
30630 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
30631 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
30632
30633 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h
30634 --- linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
30635 +++ linux-3.0.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
30636 @@ -585,7 +585,7 @@ struct ath_hw_private_ops {
30637
30638 /* ANI */
30639 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30640 -};
30641 +} __no_const;
30642
30643 /**
30644 * struct ath_hw_ops - callbacks used by hardware code and driver code
30645 @@ -637,7 +637,7 @@ struct ath_hw_ops {
30646 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
30647 struct ath_hw_antcomb_conf *antconf);
30648
30649 -};
30650 +} __no_const;
30651
30652 struct ath_nf_limits {
30653 s16 max;
30654 @@ -650,7 +650,7 @@ struct ath_nf_limits {
30655 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
30656
30657 struct ath_hw {
30658 - struct ath_ops reg_ops;
30659 + ath_ops_no_const reg_ops;
30660
30661 struct ieee80211_hw *hw;
30662 struct ath_common common;
30663 diff -urNp linux-3.0.4/drivers/net/wireless/ath/ath.h linux-3.0.4/drivers/net/wireless/ath/ath.h
30664 --- linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
30665 +++ linux-3.0.4/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
30666 @@ -121,6 +121,7 @@ struct ath_ops {
30667 void (*write_flush) (void *);
30668 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
30669 };
30670 +typedef struct ath_ops __no_const ath_ops_no_const;
30671
30672 struct ath_common;
30673 struct ath_bus_ops;
30674 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c
30675 --- linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
30676 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
30677 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30678 int err;
30679 DECLARE_SSID_BUF(ssid);
30680
30681 + pax_track_stack();
30682 +
30683 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30684
30685 if (ssid_len)
30686 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30687 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30688 int err;
30689
30690 + pax_track_stack();
30691 +
30692 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30693 idx, keylen, len);
30694
30695 diff -urNp linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30696 --- linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
30697 +++ linux-3.0.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
30698 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30699 unsigned long flags;
30700 DECLARE_SSID_BUF(ssid);
30701
30702 + pax_track_stack();
30703 +
30704 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30705 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30706 print_ssid(ssid, info_element->data, info_element->len),
30707 diff -urNp linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30708 --- linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-07-21 22:17:23.000000000 -0400
30709 +++ linux-3.0.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-23 21:47:55.000000000 -0400
30710 @@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
30711 */
30712 if (iwl3945_mod_params.disable_hw_scan) {
30713 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30714 - iwl3945_hw_ops.hw_scan = NULL;
30715 + pax_open_kernel();
30716 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30717 + pax_close_kernel();
30718 }
30719
30720 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30721 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30722 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
30723 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
30724 @@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
30725 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30726 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30727
30728 + pax_track_stack();
30729 +
30730 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30731
30732 /* Treat uninitialized rate scaling data same as non-existing. */
30733 @@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
30734 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30735 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30736
30737 + pax_track_stack();
30738 +
30739 /* Override starting rate (index 0) if needed for debug purposes */
30740 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30741
30742 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30743 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
30744 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
30745 @@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
30746 int pos = 0;
30747 const size_t bufsz = sizeof(buf);
30748
30749 + pax_track_stack();
30750 +
30751 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30752 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30753 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30754 @@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30755 char buf[256 * NUM_IWL_RXON_CTX];
30756 const size_t bufsz = sizeof(buf);
30757
30758 + pax_track_stack();
30759 +
30760 for_each_context(priv, ctx) {
30761 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30762 ctx->ctxid);
30763 diff -urNp linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30764 --- linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
30765 +++ linux-3.0.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
30766 @@ -68,8 +68,8 @@ do {
30767 } while (0)
30768
30769 #else
30770 -#define IWL_DEBUG(__priv, level, fmt, args...)
30771 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30772 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30773 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30774 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30775 const void *p, u32 len)
30776 {}
30777 diff -urNp linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30778 --- linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
30779 +++ linux-3.0.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
30780 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30781 int buf_len = 512;
30782 size_t len = 0;
30783
30784 + pax_track_stack();
30785 +
30786 if (*ppos != 0)
30787 return 0;
30788 if (count < sizeof(buf))
30789 diff -urNp linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c
30790 --- linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
30791 +++ linux-3.0.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
30792 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30793 return -EINVAL;
30794
30795 if (fake_hw_scan) {
30796 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30797 - mac80211_hwsim_ops.sw_scan_start = NULL;
30798 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30799 + pax_open_kernel();
30800 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30801 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30802 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30803 + pax_close_kernel();
30804 }
30805
30806 spin_lock_init(&hwsim_radio_lock);
30807 diff -urNp linux-3.0.4/drivers/net/wireless/rndis_wlan.c linux-3.0.4/drivers/net/wireless/rndis_wlan.c
30808 --- linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
30809 +++ linux-3.0.4/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
30810 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30811
30812 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30813
30814 - if (rts_threshold < 0 || rts_threshold > 2347)
30815 + if (rts_threshold > 2347)
30816 rts_threshold = 2347;
30817
30818 tmp = cpu_to_le32(rts_threshold);
30819 diff -urNp linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30820 --- linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
30821 +++ linux-3.0.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
30822 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
30823 u8 rfpath;
30824 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30825
30826 + pax_track_stack();
30827 +
30828 precommoncmdcnt = 0;
30829 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30830 MAX_PRECMD_CNT,
30831 diff -urNp linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h
30832 --- linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
30833 +++ linux-3.0.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
30834 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
30835 void (*reset)(struct wl1251 *wl);
30836 void (*enable_irq)(struct wl1251 *wl);
30837 void (*disable_irq)(struct wl1251 *wl);
30838 -};
30839 +} __no_const;
30840
30841 struct wl1251 {
30842 struct ieee80211_hw *hw;
30843 diff -urNp linux-3.0.4/drivers/net/wireless/wl12xx/spi.c linux-3.0.4/drivers/net/wireless/wl12xx/spi.c
30844 --- linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
30845 +++ linux-3.0.4/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
30846 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30847 u32 chunk_len;
30848 int i;
30849
30850 + pax_track_stack();
30851 +
30852 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30853
30854 spi_message_init(&m);
30855 diff -urNp linux-3.0.4/drivers/oprofile/buffer_sync.c linux-3.0.4/drivers/oprofile/buffer_sync.c
30856 --- linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
30857 +++ linux-3.0.4/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
30858 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30859 if (cookie == NO_COOKIE)
30860 offset = pc;
30861 if (cookie == INVALID_COOKIE) {
30862 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30863 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30864 offset = pc;
30865 }
30866 if (cookie != last_cookie) {
30867 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30868 /* add userspace sample */
30869
30870 if (!mm) {
30871 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30872 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30873 return 0;
30874 }
30875
30876 cookie = lookup_dcookie(mm, s->eip, &offset);
30877
30878 if (cookie == INVALID_COOKIE) {
30879 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30880 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30881 return 0;
30882 }
30883
30884 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30885 /* ignore backtraces if failed to add a sample */
30886 if (state == sb_bt_start) {
30887 state = sb_bt_ignore;
30888 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30889 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30890 }
30891 }
30892 release_mm(mm);
30893 diff -urNp linux-3.0.4/drivers/oprofile/event_buffer.c linux-3.0.4/drivers/oprofile/event_buffer.c
30894 --- linux-3.0.4/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
30895 +++ linux-3.0.4/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
30896 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30897 }
30898
30899 if (buffer_pos == buffer_size) {
30900 - atomic_inc(&oprofile_stats.event_lost_overflow);
30901 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30902 return;
30903 }
30904
30905 diff -urNp linux-3.0.4/drivers/oprofile/oprof.c linux-3.0.4/drivers/oprofile/oprof.c
30906 --- linux-3.0.4/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
30907 +++ linux-3.0.4/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
30908 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30909 if (oprofile_ops.switch_events())
30910 return;
30911
30912 - atomic_inc(&oprofile_stats.multiplex_counter);
30913 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30914 start_switch_worker();
30915 }
30916
30917 diff -urNp linux-3.0.4/drivers/oprofile/oprofilefs.c linux-3.0.4/drivers/oprofile/oprofilefs.c
30918 --- linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
30919 +++ linux-3.0.4/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
30920 @@ -186,7 +186,7 @@ static const struct file_operations atom
30921
30922
30923 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30924 - char const *name, atomic_t *val)
30925 + char const *name, atomic_unchecked_t *val)
30926 {
30927 return __oprofilefs_create_file(sb, root, name,
30928 &atomic_ro_fops, 0444, val);
30929 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.c linux-3.0.4/drivers/oprofile/oprofile_stats.c
30930 --- linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
30931 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
30932 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30933 cpu_buf->sample_invalid_eip = 0;
30934 }
30935
30936 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30937 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30938 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30939 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30940 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30941 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30942 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30943 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30944 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30945 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30946 }
30947
30948
30949 diff -urNp linux-3.0.4/drivers/oprofile/oprofile_stats.h linux-3.0.4/drivers/oprofile/oprofile_stats.h
30950 --- linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
30951 +++ linux-3.0.4/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
30952 @@ -13,11 +13,11 @@
30953 #include <asm/atomic.h>
30954
30955 struct oprofile_stat_struct {
30956 - atomic_t sample_lost_no_mm;
30957 - atomic_t sample_lost_no_mapping;
30958 - atomic_t bt_lost_no_mapping;
30959 - atomic_t event_lost_overflow;
30960 - atomic_t multiplex_counter;
30961 + atomic_unchecked_t sample_lost_no_mm;
30962 + atomic_unchecked_t sample_lost_no_mapping;
30963 + atomic_unchecked_t bt_lost_no_mapping;
30964 + atomic_unchecked_t event_lost_overflow;
30965 + atomic_unchecked_t multiplex_counter;
30966 };
30967
30968 extern struct oprofile_stat_struct oprofile_stats;
30969 diff -urNp linux-3.0.4/drivers/parport/procfs.c linux-3.0.4/drivers/parport/procfs.c
30970 --- linux-3.0.4/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
30971 +++ linux-3.0.4/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
30972 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30973
30974 *ppos += len;
30975
30976 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30977 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30978 }
30979
30980 #ifdef CONFIG_PARPORT_1284
30981 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30982
30983 *ppos += len;
30984
30985 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30986 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30987 }
30988 #endif /* IEEE1284.3 support. */
30989
30990 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h
30991 --- linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
30992 +++ linux-3.0.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
30993 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30994 int (*hardware_test) (struct slot* slot, u32 value);
30995 u8 (*get_power) (struct slot* slot);
30996 int (*set_power) (struct slot* slot, int value);
30997 -};
30998 +} __no_const;
30999
31000 struct cpci_hp_controller {
31001 unsigned int irq;
31002 diff -urNp linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c
31003 --- linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
31004 +++ linux-3.0.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
31005 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
31006
31007 void compaq_nvram_init (void __iomem *rom_start)
31008 {
31009 +
31010 +#ifndef CONFIG_PAX_KERNEXEC
31011 if (rom_start) {
31012 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
31013 }
31014 +#endif
31015 +
31016 dbg("int15 entry = %p\n", compaq_int15_entry_point);
31017
31018 /* initialize our int15 lock */
31019 diff -urNp linux-3.0.4/drivers/pci/pcie/aspm.c linux-3.0.4/drivers/pci/pcie/aspm.c
31020 --- linux-3.0.4/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
31021 +++ linux-3.0.4/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
31022 @@ -27,9 +27,9 @@
31023 #define MODULE_PARAM_PREFIX "pcie_aspm."
31024
31025 /* Note: those are not register definitions */
31026 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31027 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
31028 -#define ASPM_STATE_L1 (4) /* L1 state */
31029 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
31030 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
31031 +#define ASPM_STATE_L1 (4U) /* L1 state */
31032 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
31033 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
31034
31035 diff -urNp linux-3.0.4/drivers/pci/probe.c linux-3.0.4/drivers/pci/probe.c
31036 --- linux-3.0.4/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
31037 +++ linux-3.0.4/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
31038 @@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
31039 u32 l, sz, mask;
31040 u16 orig_cmd;
31041
31042 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
31043 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
31044
31045 if (!dev->mmio_always_on) {
31046 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
31047 diff -urNp linux-3.0.4/drivers/pci/proc.c linux-3.0.4/drivers/pci/proc.c
31048 --- linux-3.0.4/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
31049 +++ linux-3.0.4/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
31050 @@ -476,7 +476,16 @@ static const struct file_operations proc
31051 static int __init pci_proc_init(void)
31052 {
31053 struct pci_dev *dev = NULL;
31054 +
31055 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
31056 +#ifdef CONFIG_GRKERNSEC_PROC_USER
31057 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
31058 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
31059 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
31060 +#endif
31061 +#else
31062 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
31063 +#endif
31064 proc_create("devices", 0, proc_bus_pci_dir,
31065 &proc_bus_pci_dev_operations);
31066 proc_initialized = 1;
31067 diff -urNp linux-3.0.4/drivers/pci/xen-pcifront.c linux-3.0.4/drivers/pci/xen-pcifront.c
31068 --- linux-3.0.4/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
31069 +++ linux-3.0.4/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
31070 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
31071 struct pcifront_sd *sd = bus->sysdata;
31072 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31073
31074 + pax_track_stack();
31075 +
31076 if (verbose_request)
31077 dev_info(&pdev->xdev->dev,
31078 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
31079 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
31080 struct pcifront_sd *sd = bus->sysdata;
31081 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31082
31083 + pax_track_stack();
31084 +
31085 if (verbose_request)
31086 dev_info(&pdev->xdev->dev,
31087 "write dev=%04x:%02x:%02x.%01x - "
31088 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
31089 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31090 struct msi_desc *entry;
31091
31092 + pax_track_stack();
31093 +
31094 if (nvec > SH_INFO_MAX_VEC) {
31095 dev_err(&dev->dev, "too much vector for pci frontend: %x."
31096 " Increase SH_INFO_MAX_VEC.\n", nvec);
31097 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
31098 struct pcifront_sd *sd = dev->bus->sysdata;
31099 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31100
31101 + pax_track_stack();
31102 +
31103 err = do_pci_op(pdev, &op);
31104
31105 /* What should do for error ? */
31106 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
31107 struct pcifront_sd *sd = dev->bus->sysdata;
31108 struct pcifront_device *pdev = pcifront_get_pdev(sd);
31109
31110 + pax_track_stack();
31111 +
31112 err = do_pci_op(pdev, &op);
31113 if (likely(!err)) {
31114 vector[0] = op.value;
31115 diff -urNp linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c
31116 --- linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
31117 +++ linux-3.0.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
31118 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
31119 return 0;
31120 }
31121
31122 -void static hotkey_mask_warn_incomplete_mask(void)
31123 +static void hotkey_mask_warn_incomplete_mask(void)
31124 {
31125 /* log only what the user can fix... */
31126 const u32 wantedmask = hotkey_driver_mask &
31127 diff -urNp linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c
31128 --- linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
31129 +++ linux-3.0.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
31130 @@ -59,7 +59,7 @@ do { \
31131 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
31132 } while(0)
31133
31134 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
31135 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
31136 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
31137
31138 /*
31139 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
31140
31141 cpu = get_cpu();
31142 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
31143 +
31144 + pax_open_kernel();
31145 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
31146 + pax_close_kernel();
31147
31148 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
31149 spin_lock_irqsave(&pnp_bios_lock, flags);
31150 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
31151 :"memory");
31152 spin_unlock_irqrestore(&pnp_bios_lock, flags);
31153
31154 + pax_open_kernel();
31155 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
31156 + pax_close_kernel();
31157 +
31158 put_cpu();
31159
31160 /* If we get here and this is set then the PnP BIOS faulted on us. */
31161 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
31162 return status;
31163 }
31164
31165 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
31166 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
31167 {
31168 int i;
31169
31170 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
31171 pnp_bios_callpoint.offset = header->fields.pm16offset;
31172 pnp_bios_callpoint.segment = PNP_CS16;
31173
31174 + pax_open_kernel();
31175 +
31176 for_each_possible_cpu(i) {
31177 struct desc_struct *gdt = get_cpu_gdt_table(i);
31178 if (!gdt)
31179 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
31180 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
31181 (unsigned long)__va(header->fields.pm16dseg));
31182 }
31183 +
31184 + pax_close_kernel();
31185 }
31186 diff -urNp linux-3.0.4/drivers/pnp/resource.c linux-3.0.4/drivers/pnp/resource.c
31187 --- linux-3.0.4/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
31188 +++ linux-3.0.4/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
31189 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
31190 return 1;
31191
31192 /* check if the resource is valid */
31193 - if (*irq < 0 || *irq > 15)
31194 + if (*irq > 15)
31195 return 0;
31196
31197 /* check if the resource is reserved */
31198 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
31199 return 1;
31200
31201 /* check if the resource is valid */
31202 - if (*dma < 0 || *dma == 4 || *dma > 7)
31203 + if (*dma == 4 || *dma > 7)
31204 return 0;
31205
31206 /* check if the resource is reserved */
31207 diff -urNp linux-3.0.4/drivers/power/bq27x00_battery.c linux-3.0.4/drivers/power/bq27x00_battery.c
31208 --- linux-3.0.4/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
31209 +++ linux-3.0.4/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
31210 @@ -67,7 +67,7 @@
31211 struct bq27x00_device_info;
31212 struct bq27x00_access_methods {
31213 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
31214 -};
31215 +} __no_const;
31216
31217 enum bq27x00_chip { BQ27000, BQ27500 };
31218
31219 diff -urNp linux-3.0.4/drivers/regulator/max8660.c linux-3.0.4/drivers/regulator/max8660.c
31220 --- linux-3.0.4/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
31221 +++ linux-3.0.4/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
31222 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
31223 max8660->shadow_regs[MAX8660_OVER1] = 5;
31224 } else {
31225 /* Otherwise devices can be toggled via software */
31226 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
31227 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
31228 + pax_open_kernel();
31229 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
31230 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
31231 + pax_close_kernel();
31232 }
31233
31234 /*
31235 diff -urNp linux-3.0.4/drivers/regulator/mc13892-regulator.c linux-3.0.4/drivers/regulator/mc13892-regulator.c
31236 --- linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
31237 +++ linux-3.0.4/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
31238 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
31239 }
31240 mc13xxx_unlock(mc13892);
31241
31242 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
31243 + pax_open_kernel();
31244 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
31245 = mc13892_vcam_set_mode;
31246 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
31247 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
31248 = mc13892_vcam_get_mode;
31249 + pax_close_kernel();
31250 for (i = 0; i < pdata->num_regulators; i++) {
31251 init_data = &pdata->regulators[i];
31252 priv->regulators[i] = regulator_register(
31253 diff -urNp linux-3.0.4/drivers/rtc/rtc-dev.c linux-3.0.4/drivers/rtc/rtc-dev.c
31254 --- linux-3.0.4/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
31255 +++ linux-3.0.4/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
31256 @@ -14,6 +14,7 @@
31257 #include <linux/module.h>
31258 #include <linux/rtc.h>
31259 #include <linux/sched.h>
31260 +#include <linux/grsecurity.h>
31261 #include "rtc-core.h"
31262
31263 static dev_t rtc_devt;
31264 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
31265 if (copy_from_user(&tm, uarg, sizeof(tm)))
31266 return -EFAULT;
31267
31268 + gr_log_timechange();
31269 +
31270 return rtc_set_time(rtc, &tm);
31271
31272 case RTC_PIE_ON:
31273 diff -urNp linux-3.0.4/drivers/scsi/aacraid/aacraid.h linux-3.0.4/drivers/scsi/aacraid/aacraid.h
31274 --- linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
31275 +++ linux-3.0.4/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
31276 @@ -492,7 +492,7 @@ struct adapter_ops
31277 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
31278 /* Administrative operations */
31279 int (*adapter_comm)(struct aac_dev * dev, int comm);
31280 -};
31281 +} __no_const;
31282
31283 /*
31284 * Define which interrupt handler needs to be installed
31285 diff -urNp linux-3.0.4/drivers/scsi/aacraid/commctrl.c linux-3.0.4/drivers/scsi/aacraid/commctrl.c
31286 --- linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
31287 +++ linux-3.0.4/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
31288 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
31289 u32 actual_fibsize64, actual_fibsize = 0;
31290 int i;
31291
31292 + pax_track_stack();
31293
31294 if (dev->in_reset) {
31295 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
31296 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfad.c linux-3.0.4/drivers/scsi/bfa/bfad.c
31297 --- linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
31298 +++ linux-3.0.4/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
31299 @@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
31300 struct bfad_vport_s *vport, *vport_new;
31301 struct bfa_fcs_driver_info_s driver_info;
31302
31303 + pax_track_stack();
31304 +
31305 /* Fill the driver_info info to fcs*/
31306 memset(&driver_info, 0, sizeof(driver_info));
31307 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
31308 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c
31309 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
31310 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
31311 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
31312 u16 len, count;
31313 u16 templen;
31314
31315 + pax_track_stack();
31316 +
31317 /*
31318 * get hba attributes
31319 */
31320 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
31321 u8 count = 0;
31322 u16 templen;
31323
31324 + pax_track_stack();
31325 +
31326 /*
31327 * get port attributes
31328 */
31329 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c
31330 --- linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
31331 +++ linux-3.0.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
31332 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
31333 struct fc_rpsc_speed_info_s speeds;
31334 struct bfa_port_attr_s pport_attr;
31335
31336 + pax_track_stack();
31337 +
31338 bfa_trc(port->fcs, rx_fchs->s_id);
31339 bfa_trc(port->fcs, rx_fchs->d_id);
31340
31341 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa.h linux-3.0.4/drivers/scsi/bfa/bfa.h
31342 --- linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
31343 +++ linux-3.0.4/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
31344 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
31345 u32 *nvecs, u32 *maxvec);
31346 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
31347 u32 *end);
31348 -};
31349 +} __no_const;
31350 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
31351
31352 struct bfa_iocfc_s {
31353 diff -urNp linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h
31354 --- linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
31355 +++ linux-3.0.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
31356 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
31357 bfa_ioc_disable_cbfn_t disable_cbfn;
31358 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
31359 bfa_ioc_reset_cbfn_t reset_cbfn;
31360 -};
31361 +} __no_const;
31362
31363 /*
31364 * Heartbeat failure notification queue element.
31365 @@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
31366 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
31367 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
31368 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
31369 -};
31370 +} __no_const;
31371
31372 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
31373 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
31374 diff -urNp linux-3.0.4/drivers/scsi/BusLogic.c linux-3.0.4/drivers/scsi/BusLogic.c
31375 --- linux-3.0.4/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
31376 +++ linux-3.0.4/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
31377 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
31378 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
31379 *PrototypeHostAdapter)
31380 {
31381 + pax_track_stack();
31382 +
31383 /*
31384 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
31385 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
31386 diff -urNp linux-3.0.4/drivers/scsi/dpt_i2o.c linux-3.0.4/drivers/scsi/dpt_i2o.c
31387 --- linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
31388 +++ linux-3.0.4/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
31389 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
31390 dma_addr_t addr;
31391 ulong flags = 0;
31392
31393 + pax_track_stack();
31394 +
31395 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
31396 // get user msg size in u32s
31397 if(get_user(size, &user_msg[0])){
31398 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31399 s32 rcode;
31400 dma_addr_t addr;
31401
31402 + pax_track_stack();
31403 +
31404 memset(msg, 0 , sizeof(msg));
31405 len = scsi_bufflen(cmd);
31406 direction = 0x00000000;
31407 diff -urNp linux-3.0.4/drivers/scsi/eata.c linux-3.0.4/drivers/scsi/eata.c
31408 --- linux-3.0.4/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
31409 +++ linux-3.0.4/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
31410 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31411 struct hostdata *ha;
31412 char name[16];
31413
31414 + pax_track_stack();
31415 +
31416 sprintf(name, "%s%d", driver_name, j);
31417
31418 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31419 diff -urNp linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c
31420 --- linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
31421 +++ linux-3.0.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
31422 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31423 } buf;
31424 int rc;
31425
31426 + pax_track_stack();
31427 +
31428 fiph = (struct fip_header *)skb->data;
31429 sub = fiph->fip_subcode;
31430
31431 diff -urNp linux-3.0.4/drivers/scsi/gdth.c linux-3.0.4/drivers/scsi/gdth.c
31432 --- linux-3.0.4/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
31433 +++ linux-3.0.4/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
31434 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31435 unsigned long flags;
31436 gdth_ha_str *ha;
31437
31438 + pax_track_stack();
31439 +
31440 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31441 return -EFAULT;
31442 ha = gdth_find_ha(ldrv.ionode);
31443 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31444 gdth_ha_str *ha;
31445 int rval;
31446
31447 + pax_track_stack();
31448 +
31449 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31450 res.number >= MAX_HDRIVES)
31451 return -EFAULT;
31452 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31453 gdth_ha_str *ha;
31454 int rval;
31455
31456 + pax_track_stack();
31457 +
31458 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31459 return -EFAULT;
31460 ha = gdth_find_ha(gen.ionode);
31461 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31462 int i;
31463 gdth_cmd_str gdtcmd;
31464 char cmnd[MAX_COMMAND_SIZE];
31465 +
31466 + pax_track_stack();
31467 +
31468 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31469
31470 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31471 diff -urNp linux-3.0.4/drivers/scsi/gdth_proc.c linux-3.0.4/drivers/scsi/gdth_proc.c
31472 --- linux-3.0.4/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
31473 +++ linux-3.0.4/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
31474 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31475 u64 paddr;
31476
31477 char cmnd[MAX_COMMAND_SIZE];
31478 +
31479 + pax_track_stack();
31480 +
31481 memset(cmnd, 0xff, 12);
31482 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31483
31484 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31485 gdth_hget_str *phg;
31486 char cmnd[MAX_COMMAND_SIZE];
31487
31488 + pax_track_stack();
31489 +
31490 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31491 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31492 if (!gdtcmd || !estr)
31493 diff -urNp linux-3.0.4/drivers/scsi/hosts.c linux-3.0.4/drivers/scsi/hosts.c
31494 --- linux-3.0.4/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
31495 +++ linux-3.0.4/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
31496 @@ -42,7 +42,7 @@
31497 #include "scsi_logging.h"
31498
31499
31500 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
31501 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31502
31503
31504 static void scsi_host_cls_release(struct device *dev)
31505 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31506 * subtract one because we increment first then return, but we need to
31507 * know what the next host number was before increment
31508 */
31509 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31510 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31511 shost->dma_channel = 0xff;
31512
31513 /* These three are default values which can be overridden */
31514 diff -urNp linux-3.0.4/drivers/scsi/hpsa.c linux-3.0.4/drivers/scsi/hpsa.c
31515 --- linux-3.0.4/drivers/scsi/hpsa.c 2011-07-21 22:17:23.000000000 -0400
31516 +++ linux-3.0.4/drivers/scsi/hpsa.c 2011-08-23 21:47:55.000000000 -0400
31517 @@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
31518 u32 a;
31519
31520 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31521 - return h->access.command_completed(h);
31522 + return h->access->command_completed(h);
31523
31524 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31525 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31526 @@ -2938,7 +2938,7 @@ static void start_io(struct ctlr_info *h
31527 while (!list_empty(&h->reqQ)) {
31528 c = list_entry(h->reqQ.next, struct CommandList, list);
31529 /* can't do anything if fifo is full */
31530 - if ((h->access.fifo_full(h))) {
31531 + if ((h->access->fifo_full(h))) {
31532 dev_warn(&h->pdev->dev, "fifo full\n");
31533 break;
31534 }
31535 @@ -2948,7 +2948,7 @@ static void start_io(struct ctlr_info *h
31536 h->Qdepth--;
31537
31538 /* Tell the controller execute command */
31539 - h->access.submit_command(h, c);
31540 + h->access->submit_command(h, c);
31541
31542 /* Put job onto the completed Q */
31543 addQ(&h->cmpQ, c);
31544 @@ -2957,17 +2957,17 @@ static void start_io(struct ctlr_info *h
31545
31546 static inline unsigned long get_next_completion(struct ctlr_info *h)
31547 {
31548 - return h->access.command_completed(h);
31549 + return h->access->command_completed(h);
31550 }
31551
31552 static inline bool interrupt_pending(struct ctlr_info *h)
31553 {
31554 - return h->access.intr_pending(h);
31555 + return h->access->intr_pending(h);
31556 }
31557
31558 static inline long interrupt_not_for_us(struct ctlr_info *h)
31559 {
31560 - return (h->access.intr_pending(h) == 0) ||
31561 + return (h->access->intr_pending(h) == 0) ||
31562 (h->interrupts_enabled == 0);
31563 }
31564
31565 @@ -3857,7 +3857,7 @@ static int __devinit hpsa_pci_init(struc
31566 if (prod_index < 0)
31567 return -ENODEV;
31568 h->product_name = products[prod_index].product_name;
31569 - h->access = *(products[prod_index].access);
31570 + h->access = products[prod_index].access;
31571
31572 if (hpsa_board_disabled(h->pdev)) {
31573 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31574 @@ -4134,7 +4134,7 @@ reinit_after_soft_reset:
31575 }
31576
31577 /* make sure the board interrupts are off */
31578 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31579 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31580
31581 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
31582 goto clean2;
31583 @@ -4168,7 +4168,7 @@ reinit_after_soft_reset:
31584 * fake ones to scoop up any residual completions.
31585 */
31586 spin_lock_irqsave(&h->lock, flags);
31587 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31588 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31589 spin_unlock_irqrestore(&h->lock, flags);
31590 free_irq(h->intr[h->intr_mode], h);
31591 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
31592 @@ -4187,9 +4187,9 @@ reinit_after_soft_reset:
31593 dev_info(&h->pdev->dev, "Board READY.\n");
31594 dev_info(&h->pdev->dev,
31595 "Waiting for stale completions to drain.\n");
31596 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31597 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31598 msleep(10000);
31599 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31600 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31601
31602 rc = controller_reset_failed(h->cfgtable);
31603 if (rc)
31604 @@ -4210,7 +4210,7 @@ reinit_after_soft_reset:
31605 }
31606
31607 /* Turn the interrupts on so we can service requests */
31608 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31609 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31610
31611 hpsa_hba_inquiry(h);
31612 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
31613 @@ -4263,7 +4263,7 @@ static void hpsa_shutdown(struct pci_dev
31614 * To write all data in the battery backed cache to disks
31615 */
31616 hpsa_flush_cache(h);
31617 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31618 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31619 free_irq(h->intr[h->intr_mode], h);
31620 #ifdef CONFIG_PCI_MSI
31621 if (h->msix_vector)
31622 @@ -4426,7 +4426,7 @@ static __devinit void hpsa_enter_perform
31623 return;
31624 }
31625 /* Change the access methods to the performant access methods */
31626 - h->access = SA5_performant_access;
31627 + h->access = &SA5_performant_access;
31628 h->transMethod = CFGTBL_Trans_Performant;
31629 }
31630
31631 diff -urNp linux-3.0.4/drivers/scsi/hpsa.h linux-3.0.4/drivers/scsi/hpsa.h
31632 --- linux-3.0.4/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
31633 +++ linux-3.0.4/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
31634 @@ -73,7 +73,7 @@ struct ctlr_info {
31635 unsigned int msix_vector;
31636 unsigned int msi_vector;
31637 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31638 - struct access_method access;
31639 + struct access_method *access;
31640
31641 /* queue and queue Info */
31642 struct list_head reqQ;
31643 diff -urNp linux-3.0.4/drivers/scsi/ips.h linux-3.0.4/drivers/scsi/ips.h
31644 --- linux-3.0.4/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
31645 +++ linux-3.0.4/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
31646 @@ -1027,7 +1027,7 @@ typedef struct {
31647 int (*intr)(struct ips_ha *);
31648 void (*enableint)(struct ips_ha *);
31649 uint32_t (*statupd)(struct ips_ha *);
31650 -} ips_hw_func_t;
31651 +} __no_const ips_hw_func_t;
31652
31653 typedef struct ips_ha {
31654 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31655 diff -urNp linux-3.0.4/drivers/scsi/libfc/fc_exch.c linux-3.0.4/drivers/scsi/libfc/fc_exch.c
31656 --- linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
31657 +++ linux-3.0.4/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
31658 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31659 * all together if not used XXX
31660 */
31661 struct {
31662 - atomic_t no_free_exch;
31663 - atomic_t no_free_exch_xid;
31664 - atomic_t xid_not_found;
31665 - atomic_t xid_busy;
31666 - atomic_t seq_not_found;
31667 - atomic_t non_bls_resp;
31668 + atomic_unchecked_t no_free_exch;
31669 + atomic_unchecked_t no_free_exch_xid;
31670 + atomic_unchecked_t xid_not_found;
31671 + atomic_unchecked_t xid_busy;
31672 + atomic_unchecked_t seq_not_found;
31673 + atomic_unchecked_t non_bls_resp;
31674 } stats;
31675 };
31676
31677 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31678 /* allocate memory for exchange */
31679 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31680 if (!ep) {
31681 - atomic_inc(&mp->stats.no_free_exch);
31682 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31683 goto out;
31684 }
31685 memset(ep, 0, sizeof(*ep));
31686 @@ -761,7 +761,7 @@ out:
31687 return ep;
31688 err:
31689 spin_unlock_bh(&pool->lock);
31690 - atomic_inc(&mp->stats.no_free_exch_xid);
31691 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31692 mempool_free(ep, mp->ep_pool);
31693 return NULL;
31694 }
31695 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31696 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31697 ep = fc_exch_find(mp, xid);
31698 if (!ep) {
31699 - atomic_inc(&mp->stats.xid_not_found);
31700 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31701 reject = FC_RJT_OX_ID;
31702 goto out;
31703 }
31704 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31705 ep = fc_exch_find(mp, xid);
31706 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31707 if (ep) {
31708 - atomic_inc(&mp->stats.xid_busy);
31709 + atomic_inc_unchecked(&mp->stats.xid_busy);
31710 reject = FC_RJT_RX_ID;
31711 goto rel;
31712 }
31713 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31714 }
31715 xid = ep->xid; /* get our XID */
31716 } else if (!ep) {
31717 - atomic_inc(&mp->stats.xid_not_found);
31718 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31719 reject = FC_RJT_RX_ID; /* XID not found */
31720 goto out;
31721 }
31722 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31723 } else {
31724 sp = &ep->seq;
31725 if (sp->id != fh->fh_seq_id) {
31726 - atomic_inc(&mp->stats.seq_not_found);
31727 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31728 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31729 goto rel;
31730 }
31731 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31732
31733 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31734 if (!ep) {
31735 - atomic_inc(&mp->stats.xid_not_found);
31736 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31737 goto out;
31738 }
31739 if (ep->esb_stat & ESB_ST_COMPLETE) {
31740 - atomic_inc(&mp->stats.xid_not_found);
31741 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31742 goto rel;
31743 }
31744 if (ep->rxid == FC_XID_UNKNOWN)
31745 ep->rxid = ntohs(fh->fh_rx_id);
31746 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31747 - atomic_inc(&mp->stats.xid_not_found);
31748 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31749 goto rel;
31750 }
31751 if (ep->did != ntoh24(fh->fh_s_id) &&
31752 ep->did != FC_FID_FLOGI) {
31753 - atomic_inc(&mp->stats.xid_not_found);
31754 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31755 goto rel;
31756 }
31757 sof = fr_sof(fp);
31758 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31759 sp->ssb_stat |= SSB_ST_RESP;
31760 sp->id = fh->fh_seq_id;
31761 } else if (sp->id != fh->fh_seq_id) {
31762 - atomic_inc(&mp->stats.seq_not_found);
31763 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31764 goto rel;
31765 }
31766
31767 @@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
31768 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31769
31770 if (!sp)
31771 - atomic_inc(&mp->stats.xid_not_found);
31772 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31773 else
31774 - atomic_inc(&mp->stats.non_bls_resp);
31775 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31776
31777 fc_frame_free(fp);
31778 }
31779 diff -urNp linux-3.0.4/drivers/scsi/libsas/sas_ata.c linux-3.0.4/drivers/scsi/libsas/sas_ata.c
31780 --- linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
31781 +++ linux-3.0.4/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
31782 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
31783 .postreset = ata_std_postreset,
31784 .error_handler = ata_std_error_handler,
31785 .post_internal_cmd = sas_ata_post_internal,
31786 - .qc_defer = ata_std_qc_defer,
31787 + .qc_defer = ata_std_qc_defer,
31788 .qc_prep = ata_noop_qc_prep,
31789 .qc_issue = sas_ata_qc_issue,
31790 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31791 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c
31792 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
31793 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
31794 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31795
31796 #include <linux/debugfs.h>
31797
31798 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31799 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31800 static unsigned long lpfc_debugfs_start_time = 0L;
31801
31802 /* iDiag */
31803 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31804 lpfc_debugfs_enable = 0;
31805
31806 len = 0;
31807 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31808 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31809 (lpfc_debugfs_max_disc_trc - 1);
31810 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31811 dtp = vport->disc_trc + i;
31812 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31813 lpfc_debugfs_enable = 0;
31814
31815 len = 0;
31816 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31817 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31818 (lpfc_debugfs_max_slow_ring_trc - 1);
31819 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31820 dtp = phba->slow_ring_trc + i;
31821 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31822 uint32_t *ptr;
31823 char buffer[1024];
31824
31825 + pax_track_stack();
31826 +
31827 off = 0;
31828 spin_lock_irq(&phba->hbalock);
31829
31830 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31831 !vport || !vport->disc_trc)
31832 return;
31833
31834 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31835 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31836 (lpfc_debugfs_max_disc_trc - 1);
31837 dtp = vport->disc_trc + index;
31838 dtp->fmt = fmt;
31839 dtp->data1 = data1;
31840 dtp->data2 = data2;
31841 dtp->data3 = data3;
31842 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31843 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31844 dtp->jif = jiffies;
31845 #endif
31846 return;
31847 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31848 !phba || !phba->slow_ring_trc)
31849 return;
31850
31851 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31852 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31853 (lpfc_debugfs_max_slow_ring_trc - 1);
31854 dtp = phba->slow_ring_trc + index;
31855 dtp->fmt = fmt;
31856 dtp->data1 = data1;
31857 dtp->data2 = data2;
31858 dtp->data3 = data3;
31859 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31860 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31861 dtp->jif = jiffies;
31862 #endif
31863 return;
31864 @@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31865 "slow_ring buffer\n");
31866 goto debug_failed;
31867 }
31868 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31869 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31870 memset(phba->slow_ring_trc, 0,
31871 (sizeof(struct lpfc_debugfs_trc) *
31872 lpfc_debugfs_max_slow_ring_trc));
31873 @@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31874 "buffer\n");
31875 goto debug_failed;
31876 }
31877 - atomic_set(&vport->disc_trc_cnt, 0);
31878 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31879
31880 snprintf(name, sizeof(name), "discovery_trace");
31881 vport->debug_disc_trc =
31882 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc.h linux-3.0.4/drivers/scsi/lpfc/lpfc.h
31883 --- linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-07-21 22:17:23.000000000 -0400
31884 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc.h 2011-08-23 21:47:55.000000000 -0400
31885 @@ -420,7 +420,7 @@ struct lpfc_vport {
31886 struct dentry *debug_nodelist;
31887 struct dentry *vport_debugfs_root;
31888 struct lpfc_debugfs_trc *disc_trc;
31889 - atomic_t disc_trc_cnt;
31890 + atomic_unchecked_t disc_trc_cnt;
31891 #endif
31892 uint8_t stat_data_enabled;
31893 uint8_t stat_data_blocked;
31894 @@ -826,8 +826,8 @@ struct lpfc_hba {
31895 struct timer_list fabric_block_timer;
31896 unsigned long bit_flags;
31897 #define FABRIC_COMANDS_BLOCKED 0
31898 - atomic_t num_rsrc_err;
31899 - atomic_t num_cmd_success;
31900 + atomic_unchecked_t num_rsrc_err;
31901 + atomic_unchecked_t num_cmd_success;
31902 unsigned long last_rsrc_error_time;
31903 unsigned long last_ramp_down_time;
31904 unsigned long last_ramp_up_time;
31905 @@ -841,7 +841,7 @@ struct lpfc_hba {
31906 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31907 struct dentry *debug_slow_ring_trc;
31908 struct lpfc_debugfs_trc *slow_ring_trc;
31909 - atomic_t slow_ring_trc_cnt;
31910 + atomic_unchecked_t slow_ring_trc_cnt;
31911 /* iDiag debugfs sub-directory */
31912 struct dentry *idiag_root;
31913 struct dentry *idiag_pci_cfg;
31914 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c
31915 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-07-21 22:17:23.000000000 -0400
31916 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-23 21:47:56.000000000 -0400
31917 @@ -9923,8 +9923,10 @@ lpfc_init(void)
31918 printk(LPFC_COPYRIGHT "\n");
31919
31920 if (lpfc_enable_npiv) {
31921 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31922 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31923 + pax_open_kernel();
31924 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31925 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31926 + pax_close_kernel();
31927 }
31928 lpfc_transport_template =
31929 fc_attach_transport(&lpfc_transport_functions);
31930 diff -urNp linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c
31931 --- linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-07-21 22:17:23.000000000 -0400
31932 +++ linux-3.0.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-23 21:47:56.000000000 -0400
31933 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31934 uint32_t evt_posted;
31935
31936 spin_lock_irqsave(&phba->hbalock, flags);
31937 - atomic_inc(&phba->num_rsrc_err);
31938 + atomic_inc_unchecked(&phba->num_rsrc_err);
31939 phba->last_rsrc_error_time = jiffies;
31940
31941 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31942 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31943 unsigned long flags;
31944 struct lpfc_hba *phba = vport->phba;
31945 uint32_t evt_posted;
31946 - atomic_inc(&phba->num_cmd_success);
31947 + atomic_inc_unchecked(&phba->num_cmd_success);
31948
31949 if (vport->cfg_lun_queue_depth <= queue_depth)
31950 return;
31951 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31952 unsigned long num_rsrc_err, num_cmd_success;
31953 int i;
31954
31955 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31956 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31957 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31958 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31959
31960 vports = lpfc_create_vport_work_array(phba);
31961 if (vports != NULL)
31962 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31963 }
31964 }
31965 lpfc_destroy_vport_work_array(phba, vports);
31966 - atomic_set(&phba->num_rsrc_err, 0);
31967 - atomic_set(&phba->num_cmd_success, 0);
31968 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31969 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31970 }
31971
31972 /**
31973 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31974 }
31975 }
31976 lpfc_destroy_vport_work_array(phba, vports);
31977 - atomic_set(&phba->num_rsrc_err, 0);
31978 - atomic_set(&phba->num_cmd_success, 0);
31979 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31980 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31981 }
31982
31983 /**
31984 diff -urNp linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c
31985 --- linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
31986 +++ linux-3.0.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
31987 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
31988 int rval;
31989 int i;
31990
31991 + pax_track_stack();
31992 +
31993 // Allocate memory for the base list of scb for management module.
31994 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31995
31996 diff -urNp linux-3.0.4/drivers/scsi/osd/osd_initiator.c linux-3.0.4/drivers/scsi/osd/osd_initiator.c
31997 --- linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
31998 +++ linux-3.0.4/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
31999 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
32000 int nelem = ARRAY_SIZE(get_attrs), a = 0;
32001 int ret;
32002
32003 + pax_track_stack();
32004 +
32005 or = osd_start_request(od, GFP_KERNEL);
32006 if (!or)
32007 return -ENOMEM;
32008 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.c linux-3.0.4/drivers/scsi/pmcraid.c
32009 --- linux-3.0.4/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
32010 +++ linux-3.0.4/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
32011 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
32012 res->scsi_dev = scsi_dev;
32013 scsi_dev->hostdata = res;
32014 res->change_detected = 0;
32015 - atomic_set(&res->read_failures, 0);
32016 - atomic_set(&res->write_failures, 0);
32017 + atomic_set_unchecked(&res->read_failures, 0);
32018 + atomic_set_unchecked(&res->write_failures, 0);
32019 rc = 0;
32020 }
32021 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
32022 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
32023
32024 /* If this was a SCSI read/write command keep count of errors */
32025 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
32026 - atomic_inc(&res->read_failures);
32027 + atomic_inc_unchecked(&res->read_failures);
32028 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
32029 - atomic_inc(&res->write_failures);
32030 + atomic_inc_unchecked(&res->write_failures);
32031
32032 if (!RES_IS_GSCSI(res->cfg_entry) &&
32033 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
32034 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
32035 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32036 * hrrq_id assigned here in queuecommand
32037 */
32038 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32039 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32040 pinstance->num_hrrq;
32041 cmd->cmd_done = pmcraid_io_done;
32042
32043 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
32044 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
32045 * hrrq_id assigned here in queuecommand
32046 */
32047 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
32048 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
32049 pinstance->num_hrrq;
32050
32051 if (request_size) {
32052 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
32053
32054 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
32055 /* add resources only after host is added into system */
32056 - if (!atomic_read(&pinstance->expose_resources))
32057 + if (!atomic_read_unchecked(&pinstance->expose_resources))
32058 return;
32059
32060 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
32061 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
32062 init_waitqueue_head(&pinstance->reset_wait_q);
32063
32064 atomic_set(&pinstance->outstanding_cmds, 0);
32065 - atomic_set(&pinstance->last_message_id, 0);
32066 - atomic_set(&pinstance->expose_resources, 0);
32067 + atomic_set_unchecked(&pinstance->last_message_id, 0);
32068 + atomic_set_unchecked(&pinstance->expose_resources, 0);
32069
32070 INIT_LIST_HEAD(&pinstance->free_res_q);
32071 INIT_LIST_HEAD(&pinstance->used_res_q);
32072 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
32073 /* Schedule worker thread to handle CCN and take care of adding and
32074 * removing devices to OS
32075 */
32076 - atomic_set(&pinstance->expose_resources, 1);
32077 + atomic_set_unchecked(&pinstance->expose_resources, 1);
32078 schedule_work(&pinstance->worker_q);
32079 return rc;
32080
32081 diff -urNp linux-3.0.4/drivers/scsi/pmcraid.h linux-3.0.4/drivers/scsi/pmcraid.h
32082 --- linux-3.0.4/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
32083 +++ linux-3.0.4/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
32084 @@ -749,7 +749,7 @@ struct pmcraid_instance {
32085 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
32086
32087 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
32088 - atomic_t last_message_id;
32089 + atomic_unchecked_t last_message_id;
32090
32091 /* configuration table */
32092 struct pmcraid_config_table *cfg_table;
32093 @@ -778,7 +778,7 @@ struct pmcraid_instance {
32094 atomic_t outstanding_cmds;
32095
32096 /* should add/delete resources to mid-layer now ?*/
32097 - atomic_t expose_resources;
32098 + atomic_unchecked_t expose_resources;
32099
32100
32101
32102 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
32103 struct pmcraid_config_table_entry_ext cfg_entry_ext;
32104 };
32105 struct scsi_device *scsi_dev; /* Link scsi_device structure */
32106 - atomic_t read_failures; /* count of failed READ commands */
32107 - atomic_t write_failures; /* count of failed WRITE commands */
32108 + atomic_unchecked_t read_failures; /* count of failed READ commands */
32109 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
32110
32111 /* To indicate add/delete/modify during CCN */
32112 u8 change_detected;
32113 diff -urNp linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h
32114 --- linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
32115 +++ linux-3.0.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
32116 @@ -2244,7 +2244,7 @@ struct isp_operations {
32117 int (*get_flash_version) (struct scsi_qla_host *, void *);
32118 int (*start_scsi) (srb_t *);
32119 int (*abort_isp) (struct scsi_qla_host *);
32120 -};
32121 +} __no_const;
32122
32123 /* MSI-X Support *************************************************************/
32124
32125 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h
32126 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
32127 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
32128 @@ -256,7 +256,7 @@ struct ddb_entry {
32129 atomic_t retry_relogin_timer; /* Min Time between relogins
32130 * (4000 only) */
32131 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
32132 - atomic_t relogin_retry_count; /* Num of times relogin has been
32133 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
32134 * retried */
32135
32136 uint16_t port;
32137 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c
32138 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
32139 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
32140 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
32141 ddb_entry->fw_ddb_index = fw_ddb_index;
32142 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
32143 atomic_set(&ddb_entry->relogin_timer, 0);
32144 - atomic_set(&ddb_entry->relogin_retry_count, 0);
32145 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32146 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32147 list_add_tail(&ddb_entry->list, &ha->ddb_list);
32148 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
32149 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
32150 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
32151 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
32152 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
32153 - atomic_set(&ddb_entry->relogin_retry_count, 0);
32154 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
32155 atomic_set(&ddb_entry->relogin_timer, 0);
32156 clear_bit(DF_RELOGIN, &ddb_entry->flags);
32157 iscsi_unblock_session(ddb_entry->sess);
32158 diff -urNp linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c
32159 --- linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
32160 +++ linux-3.0.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
32161 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
32162 ddb_entry->fw_ddb_device_state ==
32163 DDB_DS_SESSION_FAILED) {
32164 /* Reset retry relogin timer */
32165 - atomic_inc(&ddb_entry->relogin_retry_count);
32166 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
32167 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
32168 " timed out-retrying"
32169 " relogin (%d)\n",
32170 ha->host_no,
32171 ddb_entry->fw_ddb_index,
32172 - atomic_read(&ddb_entry->
32173 + atomic_read_unchecked(&ddb_entry->
32174 relogin_retry_count))
32175 );
32176 start_dpc++;
32177 diff -urNp linux-3.0.4/drivers/scsi/scsi.c linux-3.0.4/drivers/scsi/scsi.c
32178 --- linux-3.0.4/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
32179 +++ linux-3.0.4/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
32180 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
32181 unsigned long timeout;
32182 int rtn = 0;
32183
32184 - atomic_inc(&cmd->device->iorequest_cnt);
32185 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32186
32187 /* check if the device is still usable */
32188 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
32189 diff -urNp linux-3.0.4/drivers/scsi/scsi_debug.c linux-3.0.4/drivers/scsi/scsi_debug.c
32190 --- linux-3.0.4/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
32191 +++ linux-3.0.4/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
32192 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
32193 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
32194 unsigned char *cmd = (unsigned char *)scp->cmnd;
32195
32196 + pax_track_stack();
32197 +
32198 if ((errsts = check_readiness(scp, 1, devip)))
32199 return errsts;
32200 memset(arr, 0, sizeof(arr));
32201 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
32202 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
32203 unsigned char *cmd = (unsigned char *)scp->cmnd;
32204
32205 + pax_track_stack();
32206 +
32207 if ((errsts = check_readiness(scp, 1, devip)))
32208 return errsts;
32209 memset(arr, 0, sizeof(arr));
32210 diff -urNp linux-3.0.4/drivers/scsi/scsi_lib.c linux-3.0.4/drivers/scsi/scsi_lib.c
32211 --- linux-3.0.4/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
32212 +++ linux-3.0.4/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
32213 @@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
32214 shost = sdev->host;
32215 scsi_init_cmd_errh(cmd);
32216 cmd->result = DID_NO_CONNECT << 16;
32217 - atomic_inc(&cmd->device->iorequest_cnt);
32218 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
32219
32220 /*
32221 * SCSI request completion path will do scsi_device_unbusy(),
32222 @@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
32223
32224 INIT_LIST_HEAD(&cmd->eh_entry);
32225
32226 - atomic_inc(&cmd->device->iodone_cnt);
32227 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
32228 if (cmd->result)
32229 - atomic_inc(&cmd->device->ioerr_cnt);
32230 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
32231
32232 disposition = scsi_decide_disposition(cmd);
32233 if (disposition != SUCCESS &&
32234 diff -urNp linux-3.0.4/drivers/scsi/scsi_sysfs.c linux-3.0.4/drivers/scsi/scsi_sysfs.c
32235 --- linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
32236 +++ linux-3.0.4/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
32237 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
32238 char *buf) \
32239 { \
32240 struct scsi_device *sdev = to_scsi_device(dev); \
32241 - unsigned long long count = atomic_read(&sdev->field); \
32242 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
32243 return snprintf(buf, 20, "0x%llx\n", count); \
32244 } \
32245 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
32246 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_fc.c linux-3.0.4/drivers/scsi/scsi_transport_fc.c
32247 --- linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
32248 +++ linux-3.0.4/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
32249 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
32250 * Netlink Infrastructure
32251 */
32252
32253 -static atomic_t fc_event_seq;
32254 +static atomic_unchecked_t fc_event_seq;
32255
32256 /**
32257 * fc_get_event_number - Obtain the next sequential FC event number
32258 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
32259 u32
32260 fc_get_event_number(void)
32261 {
32262 - return atomic_add_return(1, &fc_event_seq);
32263 + return atomic_add_return_unchecked(1, &fc_event_seq);
32264 }
32265 EXPORT_SYMBOL(fc_get_event_number);
32266
32267 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void
32268 {
32269 int error;
32270
32271 - atomic_set(&fc_event_seq, 0);
32272 + atomic_set_unchecked(&fc_event_seq, 0);
32273
32274 error = transport_class_register(&fc_host_class);
32275 if (error)
32276 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
32277 char *cp;
32278
32279 *val = simple_strtoul(buf, &cp, 0);
32280 - if ((*cp && (*cp != '\n')) || (*val < 0))
32281 + if (*cp && (*cp != '\n'))
32282 return -EINVAL;
32283 /*
32284 * Check for overflow; dev_loss_tmo is u32
32285 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c
32286 --- linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
32287 +++ linux-3.0.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
32288 @@ -83,7 +83,7 @@ struct iscsi_internal {
32289 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
32290 };
32291
32292 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
32293 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
32294 static struct workqueue_struct *iscsi_eh_timer_workq;
32295
32296 /*
32297 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
32298 int err;
32299
32300 ihost = shost->shost_data;
32301 - session->sid = atomic_add_return(1, &iscsi_session_nr);
32302 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
32303
32304 if (id == ISCSI_MAX_TARGET) {
32305 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
32306 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
32307 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
32308 ISCSI_TRANSPORT_VERSION);
32309
32310 - atomic_set(&iscsi_session_nr, 0);
32311 + atomic_set_unchecked(&iscsi_session_nr, 0);
32312
32313 err = class_register(&iscsi_transport_class);
32314 if (err)
32315 diff -urNp linux-3.0.4/drivers/scsi/scsi_transport_srp.c linux-3.0.4/drivers/scsi/scsi_transport_srp.c
32316 --- linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
32317 +++ linux-3.0.4/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
32318 @@ -33,7 +33,7 @@
32319 #include "scsi_transport_srp_internal.h"
32320
32321 struct srp_host_attrs {
32322 - atomic_t next_port_id;
32323 + atomic_unchecked_t next_port_id;
32324 };
32325 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
32326
32327 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
32328 struct Scsi_Host *shost = dev_to_shost(dev);
32329 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
32330
32331 - atomic_set(&srp_host->next_port_id, 0);
32332 + atomic_set_unchecked(&srp_host->next_port_id, 0);
32333 return 0;
32334 }
32335
32336 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
32337 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
32338 rport->roles = ids->roles;
32339
32340 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
32341 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
32342 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
32343
32344 transport_setup_device(&rport->dev);
32345 diff -urNp linux-3.0.4/drivers/scsi/sg.c linux-3.0.4/drivers/scsi/sg.c
32346 --- linux-3.0.4/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
32347 +++ linux-3.0.4/drivers/scsi/sg.c 2011-08-23 21:47:56.000000000 -0400
32348 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
32349 const struct file_operations * fops;
32350 };
32351
32352 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
32353 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
32354 {"allow_dio", &adio_fops},
32355 {"debug", &debug_fops},
32356 {"def_reserved_size", &dressz_fops},
32357 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
32358 {
32359 int k, mask;
32360 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
32361 - struct sg_proc_leaf * leaf;
32362 + const struct sg_proc_leaf * leaf;
32363
32364 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
32365 if (!sg_proc_sgp)
32366 diff -urNp linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c
32367 --- linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
32368 +++ linux-3.0.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
32369 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
32370 int do_iounmap = 0;
32371 int do_disable_device = 1;
32372
32373 + pax_track_stack();
32374 +
32375 memset(&sym_dev, 0, sizeof(sym_dev));
32376 memset(&nvram, 0, sizeof(nvram));
32377 sym_dev.pdev = pdev;
32378 diff -urNp linux-3.0.4/drivers/scsi/vmw_pvscsi.c linux-3.0.4/drivers/scsi/vmw_pvscsi.c
32379 --- linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
32380 +++ linux-3.0.4/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
32381 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
32382 dma_addr_t base;
32383 unsigned i;
32384
32385 + pax_track_stack();
32386 +
32387 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
32388 cmd.reqRingNumPages = adapter->req_pages;
32389 cmd.cmpRingNumPages = adapter->cmp_pages;
32390 diff -urNp linux-3.0.4/drivers/spi/spi.c linux-3.0.4/drivers/spi/spi.c
32391 --- linux-3.0.4/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
32392 +++ linux-3.0.4/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
32393 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
32394 EXPORT_SYMBOL_GPL(spi_bus_unlock);
32395
32396 /* portable code must never pass more than 32 bytes */
32397 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
32398 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
32399
32400 static u8 *buf;
32401
32402 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
32403 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
32404 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
32405 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
32406 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
32407
32408
32409 -static struct net_device_ops ar6000_netdev_ops = {
32410 +static net_device_ops_no_const ar6000_netdev_ops = {
32411 .ndo_init = NULL,
32412 .ndo_open = ar6000_open,
32413 .ndo_stop = ar6000_close,
32414 diff -urNp linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
32415 --- linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
32416 +++ linux-3.0.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
32417 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
32418 typedef struct ar6k_pal_config_s
32419 {
32420 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32421 -}ar6k_pal_config_t;
32422 +} __no_const ar6k_pal_config_t;
32423
32424 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32425 #endif /* _AR6K_PAL_H_ */
32426 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32427 --- linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
32428 +++ linux-3.0.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
32429 @@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32430 free_netdev(ifp->net);
32431 }
32432 /* Allocate etherdev, including space for private structure */
32433 - ifp->net = alloc_etherdev(sizeof(dhd));
32434 + ifp->net = alloc_etherdev(sizeof(*dhd));
32435 if (!ifp->net) {
32436 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32437 ret = -ENOMEM;
32438 }
32439 if (ret == 0) {
32440 strcpy(ifp->net->name, ifp->name);
32441 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32442 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32443 err = dhd_net_attach(&dhd->pub, ifp->idx);
32444 if (err != 0) {
32445 DHD_ERROR(("%s: dhd_net_attach failed, "
32446 @@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32447 strcpy(nv_path, nvram_path);
32448
32449 /* Allocate etherdev, including space for private structure */
32450 - net = alloc_etherdev(sizeof(dhd));
32451 + net = alloc_etherdev(sizeof(*dhd));
32452 if (!net) {
32453 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32454 goto fail;
32455 @@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32456 /*
32457 * Save the dhd_info into the priv
32458 */
32459 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32460 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32461
32462 /* Set network interface name if it was provided as module parameter */
32463 if (iface_name[0]) {
32464 @@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32465 /*
32466 * Save the dhd_info into the priv
32467 */
32468 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32469 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32470
32471 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32472 g_bus = bus;
32473 diff -urNp linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
32474 --- linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
32475 +++ linux-3.0.4/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
32476 @@ -593,7 +593,7 @@ struct phy_func_ptr {
32477 initfn_t carrsuppr;
32478 rxsigpwrfn_t rxsigpwr;
32479 detachfn_t detach;
32480 -};
32481 +} __no_const;
32482 typedef struct phy_func_ptr phy_func_ptr_t;
32483
32484 struct phy_info {
32485 diff -urNp linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h
32486 --- linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
32487 +++ linux-3.0.4/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
32488 @@ -185,7 +185,7 @@ typedef struct {
32489 u16 func, uint bustype, void *regsva, void *param);
32490 /* detach from device */
32491 void (*detach) (void *ch);
32492 -} bcmsdh_driver_t;
32493 +} __no_const bcmsdh_driver_t;
32494
32495 /* platform specific/high level functions */
32496 extern int bcmsdh_register(bcmsdh_driver_t *driver);
32497 diff -urNp linux-3.0.4/drivers/staging/et131x/et1310_tx.c linux-3.0.4/drivers/staging/et131x/et1310_tx.c
32498 --- linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
32499 +++ linux-3.0.4/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
32500 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32501 struct net_device_stats *stats = &etdev->net_stats;
32502
32503 if (tcb->flags & fMP_DEST_BROAD)
32504 - atomic_inc(&etdev->Stats.brdcstxmt);
32505 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32506 else if (tcb->flags & fMP_DEST_MULTI)
32507 - atomic_inc(&etdev->Stats.multixmt);
32508 + atomic_inc_unchecked(&etdev->Stats.multixmt);
32509 else
32510 - atomic_inc(&etdev->Stats.unixmt);
32511 + atomic_inc_unchecked(&etdev->Stats.unixmt);
32512
32513 if (tcb->skb) {
32514 stats->tx_bytes += tcb->skb->len;
32515 diff -urNp linux-3.0.4/drivers/staging/et131x/et131x_adapter.h linux-3.0.4/drivers/staging/et131x/et131x_adapter.h
32516 --- linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
32517 +++ linux-3.0.4/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
32518 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32519 * operations
32520 */
32521 u32 unircv; /* # multicast packets received */
32522 - atomic_t unixmt; /* # multicast packets for Tx */
32523 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32524 u32 multircv; /* # multicast packets received */
32525 - atomic_t multixmt; /* # multicast packets for Tx */
32526 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32527 u32 brdcstrcv; /* # broadcast packets received */
32528 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
32529 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32530 u32 norcvbuf; /* # Rx packets discarded */
32531 u32 noxmtbuf; /* # Tx packets discarded */
32532
32533 diff -urNp linux-3.0.4/drivers/staging/hv/channel.c linux-3.0.4/drivers/staging/hv/channel.c
32534 --- linux-3.0.4/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
32535 +++ linux-3.0.4/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
32536 @@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32537 int ret = 0;
32538 int t;
32539
32540 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32541 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32542 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32543 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32544
32545 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32546 if (ret)
32547 diff -urNp linux-3.0.4/drivers/staging/hv/hv.c linux-3.0.4/drivers/staging/hv/hv.c
32548 --- linux-3.0.4/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
32549 +++ linux-3.0.4/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
32550 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
32551 u64 output_address = (output) ? virt_to_phys(output) : 0;
32552 u32 output_address_hi = output_address >> 32;
32553 u32 output_address_lo = output_address & 0xFFFFFFFF;
32554 - volatile void *hypercall_page = hv_context.hypercall_page;
32555 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32556
32557 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
32558 "=a"(hv_status_lo) : "d" (control_hi),
32559 diff -urNp linux-3.0.4/drivers/staging/hv/hv_mouse.c linux-3.0.4/drivers/staging/hv/hv_mouse.c
32560 --- linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
32561 +++ linux-3.0.4/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
32562 @@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
32563 if (hid_dev) {
32564 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32565
32566 - hid_dev->ll_driver->open = mousevsc_hid_open;
32567 - hid_dev->ll_driver->close = mousevsc_hid_close;
32568 + pax_open_kernel();
32569 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32570 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32571 + pax_close_kernel();
32572
32573 hid_dev->bus = BUS_VIRTUAL;
32574 hid_dev->vendor = input_device_ctx->device_info.vendor;
32575 diff -urNp linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h
32576 --- linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
32577 +++ linux-3.0.4/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
32578 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
32579 struct vmbus_connection {
32580 enum vmbus_connect_state conn_state;
32581
32582 - atomic_t next_gpadl_handle;
32583 + atomic_unchecked_t next_gpadl_handle;
32584
32585 /*
32586 * Represents channel interrupts. Each bit position represents a
32587 diff -urNp linux-3.0.4/drivers/staging/hv/rndis_filter.c linux-3.0.4/drivers/staging/hv/rndis_filter.c
32588 --- linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
32589 +++ linux-3.0.4/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
32590 @@ -43,7 +43,7 @@ struct rndis_device {
32591
32592 enum rndis_device_state state;
32593 u32 link_stat;
32594 - atomic_t new_req_id;
32595 + atomic_unchecked_t new_req_id;
32596
32597 spinlock_t request_lock;
32598 struct list_head req_list;
32599 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
32600 * template
32601 */
32602 set = &rndis_msg->msg.set_req;
32603 - set->req_id = atomic_inc_return(&dev->new_req_id);
32604 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32605
32606 /* Add to the request list */
32607 spin_lock_irqsave(&dev->request_lock, flags);
32608 @@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
32609
32610 /* Setup the rndis set */
32611 halt = &request->request_msg.msg.halt_req;
32612 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32613 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32614
32615 /* Ignore return since this msg is optional. */
32616 rndis_filter_send_request(dev, request);
32617 diff -urNp linux-3.0.4/drivers/staging/hv/vmbus_drv.c linux-3.0.4/drivers/staging/hv/vmbus_drv.c
32618 --- linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
32619 +++ linux-3.0.4/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
32620 @@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
32621 {
32622 int ret = 0;
32623
32624 - static atomic_t device_num = ATOMIC_INIT(0);
32625 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32626
32627 /* Set the device name. Otherwise, device_register() will fail. */
32628 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32629 - atomic_inc_return(&device_num));
32630 + atomic_inc_return_unchecked(&device_num));
32631
32632 /* The new device belongs to this bus */
32633 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
32634 diff -urNp linux-3.0.4/drivers/staging/iio/ring_generic.h linux-3.0.4/drivers/staging/iio/ring_generic.h
32635 --- linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
32636 +++ linux-3.0.4/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
32637 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
32638
32639 int (*is_enabled)(struct iio_ring_buffer *ring);
32640 int (*enable)(struct iio_ring_buffer *ring);
32641 -};
32642 +} __no_const;
32643
32644 struct iio_ring_setup_ops {
32645 int (*preenable)(struct iio_dev *);
32646 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet.c linux-3.0.4/drivers/staging/octeon/ethernet.c
32647 --- linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
32648 +++ linux-3.0.4/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
32649 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32650 * since the RX tasklet also increments it.
32651 */
32652 #ifdef CONFIG_64BIT
32653 - atomic64_add(rx_status.dropped_packets,
32654 - (atomic64_t *)&priv->stats.rx_dropped);
32655 + atomic64_add_unchecked(rx_status.dropped_packets,
32656 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32657 #else
32658 - atomic_add(rx_status.dropped_packets,
32659 - (atomic_t *)&priv->stats.rx_dropped);
32660 + atomic_add_unchecked(rx_status.dropped_packets,
32661 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32662 #endif
32663 }
32664
32665 diff -urNp linux-3.0.4/drivers/staging/octeon/ethernet-rx.c linux-3.0.4/drivers/staging/octeon/ethernet-rx.c
32666 --- linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
32667 +++ linux-3.0.4/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
32668 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32669 /* Increment RX stats for virtual ports */
32670 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32671 #ifdef CONFIG_64BIT
32672 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32673 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32674 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32675 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32676 #else
32677 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32678 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32679 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32680 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32681 #endif
32682 }
32683 netif_receive_skb(skb);
32684 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32685 dev->name);
32686 */
32687 #ifdef CONFIG_64BIT
32688 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32689 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32690 #else
32691 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32692 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32693 #endif
32694 dev_kfree_skb_irq(skb);
32695 }
32696 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/inode.c linux-3.0.4/drivers/staging/pohmelfs/inode.c
32697 --- linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
32698 +++ linux-3.0.4/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
32699 @@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
32700 mutex_init(&psb->mcache_lock);
32701 psb->mcache_root = RB_ROOT;
32702 psb->mcache_timeout = msecs_to_jiffies(5000);
32703 - atomic_long_set(&psb->mcache_gen, 0);
32704 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32705
32706 psb->trans_max_pages = 100;
32707
32708 @@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
32709 INIT_LIST_HEAD(&psb->crypto_ready_list);
32710 INIT_LIST_HEAD(&psb->crypto_active_list);
32711
32712 - atomic_set(&psb->trans_gen, 1);
32713 + atomic_set_unchecked(&psb->trans_gen, 1);
32714 atomic_long_set(&psb->total_inodes, 0);
32715
32716 mutex_init(&psb->state_lock);
32717 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/mcache.c linux-3.0.4/drivers/staging/pohmelfs/mcache.c
32718 --- linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
32719 +++ linux-3.0.4/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
32720 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32721 m->data = data;
32722 m->start = start;
32723 m->size = size;
32724 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32725 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32726
32727 mutex_lock(&psb->mcache_lock);
32728 err = pohmelfs_mcache_insert(psb, m);
32729 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/netfs.h linux-3.0.4/drivers/staging/pohmelfs/netfs.h
32730 --- linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
32731 +++ linux-3.0.4/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
32732 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32733 struct pohmelfs_sb {
32734 struct rb_root mcache_root;
32735 struct mutex mcache_lock;
32736 - atomic_long_t mcache_gen;
32737 + atomic_long_unchecked_t mcache_gen;
32738 unsigned long mcache_timeout;
32739
32740 unsigned int idx;
32741
32742 unsigned int trans_retries;
32743
32744 - atomic_t trans_gen;
32745 + atomic_unchecked_t trans_gen;
32746
32747 unsigned int crypto_attached_size;
32748 unsigned int crypto_align_size;
32749 diff -urNp linux-3.0.4/drivers/staging/pohmelfs/trans.c linux-3.0.4/drivers/staging/pohmelfs/trans.c
32750 --- linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
32751 +++ linux-3.0.4/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
32752 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32753 int err;
32754 struct netfs_cmd *cmd = t->iovec.iov_base;
32755
32756 - t->gen = atomic_inc_return(&psb->trans_gen);
32757 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32758
32759 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32760 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32761 diff -urNp linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h
32762 --- linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
32763 +++ linux-3.0.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
32764 @@ -83,7 +83,7 @@ struct _io_ops {
32765 u8 *pmem);
32766 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32767 u8 *pmem);
32768 -};
32769 +} __no_const;
32770
32771 struct io_req {
32772 struct list_head list;
32773 diff -urNp linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c
32774 --- linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
32775 +++ linux-3.0.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
32776 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32777 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32778
32779 if (rlen)
32780 - if (copy_to_user(data, &resp, rlen))
32781 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32782 return -EFAULT;
32783
32784 return 0;
32785 diff -urNp linux-3.0.4/drivers/staging/tty/stallion.c linux-3.0.4/drivers/staging/tty/stallion.c
32786 --- linux-3.0.4/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
32787 +++ linux-3.0.4/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
32788 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32789 struct stlport stl_dummyport;
32790 struct stlport *portp;
32791
32792 + pax_track_stack();
32793 +
32794 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32795 return -EFAULT;
32796 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32797 diff -urNp linux-3.0.4/drivers/staging/usbip/usbip_common.h linux-3.0.4/drivers/staging/usbip/usbip_common.h
32798 --- linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
32799 +++ linux-3.0.4/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
32800 @@ -315,7 +315,7 @@ struct usbip_device {
32801 void (*shutdown)(struct usbip_device *);
32802 void (*reset)(struct usbip_device *);
32803 void (*unusable)(struct usbip_device *);
32804 - } eh_ops;
32805 + } __no_const eh_ops;
32806 };
32807
32808 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
32809 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci.h linux-3.0.4/drivers/staging/usbip/vhci.h
32810 --- linux-3.0.4/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
32811 +++ linux-3.0.4/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
32812 @@ -94,7 +94,7 @@ struct vhci_hcd {
32813 unsigned resuming:1;
32814 unsigned long re_timeout;
32815
32816 - atomic_t seqnum;
32817 + atomic_unchecked_t seqnum;
32818
32819 /*
32820 * NOTE:
32821 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_hcd.c linux-3.0.4/drivers/staging/usbip/vhci_hcd.c
32822 --- linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
32823 +++ linux-3.0.4/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
32824 @@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
32825 return;
32826 }
32827
32828 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32829 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32830 if (priv->seqnum == 0xffff)
32831 dev_info(&urb->dev->dev, "seqnum max\n");
32832
32833 @@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
32834 return -ENOMEM;
32835 }
32836
32837 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32838 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32839 if (unlink->seqnum == 0xffff)
32840 pr_info("seqnum max\n");
32841
32842 @@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
32843 vdev->rhport = rhport;
32844 }
32845
32846 - atomic_set(&vhci->seqnum, 0);
32847 + atomic_set_unchecked(&vhci->seqnum, 0);
32848 spin_lock_init(&vhci->lock);
32849
32850 hcd->power_budget = 0; /* no limit */
32851 diff -urNp linux-3.0.4/drivers/staging/usbip/vhci_rx.c linux-3.0.4/drivers/staging/usbip/vhci_rx.c
32852 --- linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
32853 +++ linux-3.0.4/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
32854 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
32855 if (!urb) {
32856 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
32857 pr_info("max seqnum %d\n",
32858 - atomic_read(&the_controller->seqnum));
32859 + atomic_read_unchecked(&the_controller->seqnum));
32860 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32861 return;
32862 }
32863 diff -urNp linux-3.0.4/drivers/staging/vt6655/hostap.c linux-3.0.4/drivers/staging/vt6655/hostap.c
32864 --- linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
32865 +++ linux-3.0.4/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
32866 @@ -79,14 +79,13 @@ static int msglevel
32867 *
32868 */
32869
32870 +static net_device_ops_no_const apdev_netdev_ops;
32871 +
32872 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32873 {
32874 PSDevice apdev_priv;
32875 struct net_device *dev = pDevice->dev;
32876 int ret;
32877 - const struct net_device_ops apdev_netdev_ops = {
32878 - .ndo_start_xmit = pDevice->tx_80211,
32879 - };
32880
32881 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32882
32883 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
32884 *apdev_priv = *pDevice;
32885 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32886
32887 + /* only half broken now */
32888 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32889 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32890
32891 pDevice->apdev->type = ARPHRD_IEEE80211;
32892 diff -urNp linux-3.0.4/drivers/staging/vt6656/hostap.c linux-3.0.4/drivers/staging/vt6656/hostap.c
32893 --- linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
32894 +++ linux-3.0.4/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
32895 @@ -80,14 +80,13 @@ static int msglevel
32896 *
32897 */
32898
32899 +static net_device_ops_no_const apdev_netdev_ops;
32900 +
32901 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
32902 {
32903 PSDevice apdev_priv;
32904 struct net_device *dev = pDevice->dev;
32905 int ret;
32906 - const struct net_device_ops apdev_netdev_ops = {
32907 - .ndo_start_xmit = pDevice->tx_80211,
32908 - };
32909
32910 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
32911
32912 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
32913 *apdev_priv = *pDevice;
32914 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
32915
32916 + /* only half broken now */
32917 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
32918 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
32919
32920 pDevice->apdev->type = ARPHRD_IEEE80211;
32921 diff -urNp linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c
32922 --- linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
32923 +++ linux-3.0.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
32924 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32925
32926 struct usbctlx_completor {
32927 int (*complete) (struct usbctlx_completor *);
32928 -};
32929 +} __no_const;
32930
32931 static int
32932 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32933 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.c linux-3.0.4/drivers/staging/zcache/tmem.c
32934 --- linux-3.0.4/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
32935 +++ linux-3.0.4/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
32936 @@ -39,7 +39,7 @@
32937 * A tmem host implementation must use this function to register callbacks
32938 * for memory allocation.
32939 */
32940 -static struct tmem_hostops tmem_hostops;
32941 +static tmem_hostops_no_const tmem_hostops;
32942
32943 static void tmem_objnode_tree_init(void);
32944
32945 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
32946 * A tmem host implementation must use this function to register
32947 * callbacks for a page-accessible memory (PAM) implementation
32948 */
32949 -static struct tmem_pamops tmem_pamops;
32950 +static tmem_pamops_no_const tmem_pamops;
32951
32952 void tmem_register_pamops(struct tmem_pamops *m)
32953 {
32954 diff -urNp linux-3.0.4/drivers/staging/zcache/tmem.h linux-3.0.4/drivers/staging/zcache/tmem.h
32955 --- linux-3.0.4/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
32956 +++ linux-3.0.4/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
32957 @@ -171,6 +171,7 @@ struct tmem_pamops {
32958 int (*get_data)(struct page *, void *, struct tmem_pool *);
32959 void (*free)(void *, struct tmem_pool *);
32960 };
32961 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
32962 extern void tmem_register_pamops(struct tmem_pamops *m);
32963
32964 /* memory allocation methods provided by the host implementation */
32965 @@ -180,6 +181,7 @@ struct tmem_hostops {
32966 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
32967 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
32968 };
32969 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
32970 extern void tmem_register_hostops(struct tmem_hostops *m);
32971
32972 /* core tmem accessor functions */
32973 diff -urNp linux-3.0.4/drivers/target/target_core_alua.c linux-3.0.4/drivers/target/target_core_alua.c
32974 --- linux-3.0.4/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
32975 +++ linux-3.0.4/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
32976 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32977 char path[ALUA_METADATA_PATH_LEN];
32978 int len;
32979
32980 + pax_track_stack();
32981 +
32982 memset(path, 0, ALUA_METADATA_PATH_LEN);
32983
32984 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32985 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32986 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32987 int len;
32988
32989 + pax_track_stack();
32990 +
32991 memset(path, 0, ALUA_METADATA_PATH_LEN);
32992 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32993
32994 diff -urNp linux-3.0.4/drivers/target/target_core_cdb.c linux-3.0.4/drivers/target/target_core_cdb.c
32995 --- linux-3.0.4/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
32996 +++ linux-3.0.4/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
32997 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32998 int length = 0;
32999 unsigned char buf[SE_MODE_PAGE_BUF];
33000
33001 + pax_track_stack();
33002 +
33003 memset(buf, 0, SE_MODE_PAGE_BUF);
33004
33005 switch (cdb[2] & 0x3f) {
33006 diff -urNp linux-3.0.4/drivers/target/target_core_configfs.c linux-3.0.4/drivers/target/target_core_configfs.c
33007 --- linux-3.0.4/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
33008 +++ linux-3.0.4/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
33009 @@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
33010 ssize_t len = 0;
33011 int reg_count = 0, prf_isid;
33012
33013 + pax_track_stack();
33014 +
33015 if (!(su_dev->se_dev_ptr))
33016 return -ENODEV;
33017
33018 diff -urNp linux-3.0.4/drivers/target/target_core_pr.c linux-3.0.4/drivers/target/target_core_pr.c
33019 --- linux-3.0.4/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
33020 +++ linux-3.0.4/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
33021 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
33022 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
33023 u16 tpgt;
33024
33025 + pax_track_stack();
33026 +
33027 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
33028 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
33029 /*
33030 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
33031 ssize_t len = 0;
33032 int reg_count = 0;
33033
33034 + pax_track_stack();
33035 +
33036 memset(buf, 0, pr_aptpl_buf_len);
33037 /*
33038 * Called to clear metadata once APTPL has been deactivated.
33039 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
33040 char path[512];
33041 int ret;
33042
33043 + pax_track_stack();
33044 +
33045 memset(iov, 0, sizeof(struct iovec));
33046 memset(path, 0, 512);
33047
33048 diff -urNp linux-3.0.4/drivers/target/target_core_tmr.c linux-3.0.4/drivers/target/target_core_tmr.c
33049 --- linux-3.0.4/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
33050 +++ linux-3.0.4/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
33051 @@ -269,7 +269,7 @@ int core_tmr_lun_reset(
33052 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
33053 T_TASK(cmd)->t_task_cdbs,
33054 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33055 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33056 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33057 atomic_read(&T_TASK(cmd)->t_transport_active),
33058 atomic_read(&T_TASK(cmd)->t_transport_stop),
33059 atomic_read(&T_TASK(cmd)->t_transport_sent));
33060 @@ -311,7 +311,7 @@ int core_tmr_lun_reset(
33061 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
33062 " task: %p, t_fe_count: %d dev: %p\n", task,
33063 fe_count, dev);
33064 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33065 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33066 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
33067 flags);
33068 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33069 @@ -321,7 +321,7 @@ int core_tmr_lun_reset(
33070 }
33071 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
33072 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
33073 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
33074 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
33075 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
33076 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
33077
33078 diff -urNp linux-3.0.4/drivers/target/target_core_transport.c linux-3.0.4/drivers/target/target_core_transport.c
33079 --- linux-3.0.4/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
33080 +++ linux-3.0.4/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
33081 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
33082
33083 dev->queue_depth = dev_limits->queue_depth;
33084 atomic_set(&dev->depth_left, dev->queue_depth);
33085 - atomic_set(&dev->dev_ordered_id, 0);
33086 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
33087
33088 se_dev_set_default_attribs(dev, dev_limits);
33089
33090 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
33091 * Used to determine when ORDERED commands should go from
33092 * Dormant to Active status.
33093 */
33094 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
33095 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
33096 smp_mb__after_atomic_inc();
33097 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
33098 cmd->se_ordered_id, cmd->sam_task_attr,
33099 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
33100 " t_transport_active: %d t_transport_stop: %d"
33101 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
33102 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33103 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33104 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33105 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
33106 atomic_read(&T_TASK(cmd)->t_transport_active),
33107 atomic_read(&T_TASK(cmd)->t_transport_stop),
33108 @@ -2673,9 +2673,9 @@ check_depth:
33109 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
33110 atomic_set(&task->task_active, 1);
33111 atomic_set(&task->task_sent, 1);
33112 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
33113 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
33114
33115 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
33116 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
33117 T_TASK(cmd)->t_task_cdbs)
33118 atomic_set(&cmd->transport_sent, 1);
33119
33120 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
33121 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
33122 }
33123 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
33124 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
33125 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
33126 goto remove;
33127
33128 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
33129 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
33130 {
33131 int ret = 0;
33132
33133 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
33134 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
33135 if (!(send_status) ||
33136 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
33137 return 1;
33138 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
33139 */
33140 if (cmd->data_direction == DMA_TO_DEVICE) {
33141 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
33142 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
33143 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
33144 smp_mb__after_atomic_inc();
33145 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
33146 transport_new_cmd_failure(cmd);
33147 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
33148 CMD_TFO(cmd)->get_task_tag(cmd),
33149 T_TASK(cmd)->t_task_cdbs,
33150 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
33151 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
33152 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
33153 atomic_read(&T_TASK(cmd)->t_transport_active),
33154 atomic_read(&T_TASK(cmd)->t_transport_stop),
33155 atomic_read(&T_TASK(cmd)->t_transport_sent));
33156 diff -urNp linux-3.0.4/drivers/telephony/ixj.c linux-3.0.4/drivers/telephony/ixj.c
33157 --- linux-3.0.4/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
33158 +++ linux-3.0.4/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
33159 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
33160 bool mContinue;
33161 char *pIn, *pOut;
33162
33163 + pax_track_stack();
33164 +
33165 if (!SCI_Prepare(j))
33166 return 0;
33167
33168 diff -urNp linux-3.0.4/drivers/tty/hvc/hvcs.c linux-3.0.4/drivers/tty/hvc/hvcs.c
33169 --- linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
33170 +++ linux-3.0.4/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
33171 @@ -83,6 +83,7 @@
33172 #include <asm/hvcserver.h>
33173 #include <asm/uaccess.h>
33174 #include <asm/vio.h>
33175 +#include <asm/local.h>
33176
33177 /*
33178 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
33179 @@ -270,7 +271,7 @@ struct hvcs_struct {
33180 unsigned int index;
33181
33182 struct tty_struct *tty;
33183 - int open_count;
33184 + local_t open_count;
33185
33186 /*
33187 * Used to tell the driver kernel_thread what operations need to take
33188 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
33189
33190 spin_lock_irqsave(&hvcsd->lock, flags);
33191
33192 - if (hvcsd->open_count > 0) {
33193 + if (local_read(&hvcsd->open_count) > 0) {
33194 spin_unlock_irqrestore(&hvcsd->lock, flags);
33195 printk(KERN_INFO "HVCS: vterm state unchanged. "
33196 "The hvcs device node is still in use.\n");
33197 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
33198 if ((retval = hvcs_partner_connect(hvcsd)))
33199 goto error_release;
33200
33201 - hvcsd->open_count = 1;
33202 + local_set(&hvcsd->open_count, 1);
33203 hvcsd->tty = tty;
33204 tty->driver_data = hvcsd;
33205
33206 @@ -1179,7 +1180,7 @@ fast_open:
33207
33208 spin_lock_irqsave(&hvcsd->lock, flags);
33209 kref_get(&hvcsd->kref);
33210 - hvcsd->open_count++;
33211 + local_inc(&hvcsd->open_count);
33212 hvcsd->todo_mask |= HVCS_SCHED_READ;
33213 spin_unlock_irqrestore(&hvcsd->lock, flags);
33214
33215 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
33216 hvcsd = tty->driver_data;
33217
33218 spin_lock_irqsave(&hvcsd->lock, flags);
33219 - if (--hvcsd->open_count == 0) {
33220 + if (local_dec_and_test(&hvcsd->open_count)) {
33221
33222 vio_disable_interrupts(hvcsd->vdev);
33223
33224 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
33225 free_irq(irq, hvcsd);
33226 kref_put(&hvcsd->kref, destroy_hvcs_struct);
33227 return;
33228 - } else if (hvcsd->open_count < 0) {
33229 + } else if (local_read(&hvcsd->open_count) < 0) {
33230 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
33231 " is missmanaged.\n",
33232 - hvcsd->vdev->unit_address, hvcsd->open_count);
33233 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
33234 }
33235
33236 spin_unlock_irqrestore(&hvcsd->lock, flags);
33237 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
33238
33239 spin_lock_irqsave(&hvcsd->lock, flags);
33240 /* Preserve this so that we know how many kref refs to put */
33241 - temp_open_count = hvcsd->open_count;
33242 + temp_open_count = local_read(&hvcsd->open_count);
33243
33244 /*
33245 * Don't kref put inside the spinlock because the destruction
33246 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
33247 hvcsd->tty->driver_data = NULL;
33248 hvcsd->tty = NULL;
33249
33250 - hvcsd->open_count = 0;
33251 + local_set(&hvcsd->open_count, 0);
33252
33253 /* This will drop any buffered data on the floor which is OK in a hangup
33254 * scenario. */
33255 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
33256 * the middle of a write operation? This is a crummy place to do this
33257 * but we want to keep it all in the spinlock.
33258 */
33259 - if (hvcsd->open_count <= 0) {
33260 + if (local_read(&hvcsd->open_count) <= 0) {
33261 spin_unlock_irqrestore(&hvcsd->lock, flags);
33262 return -ENODEV;
33263 }
33264 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
33265 {
33266 struct hvcs_struct *hvcsd = tty->driver_data;
33267
33268 - if (!hvcsd || hvcsd->open_count <= 0)
33269 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
33270 return 0;
33271
33272 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
33273 diff -urNp linux-3.0.4/drivers/tty/ipwireless/tty.c linux-3.0.4/drivers/tty/ipwireless/tty.c
33274 --- linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
33275 +++ linux-3.0.4/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
33276 @@ -29,6 +29,7 @@
33277 #include <linux/tty_driver.h>
33278 #include <linux/tty_flip.h>
33279 #include <linux/uaccess.h>
33280 +#include <asm/local.h>
33281
33282 #include "tty.h"
33283 #include "network.h"
33284 @@ -51,7 +52,7 @@ struct ipw_tty {
33285 int tty_type;
33286 struct ipw_network *network;
33287 struct tty_struct *linux_tty;
33288 - int open_count;
33289 + local_t open_count;
33290 unsigned int control_lines;
33291 struct mutex ipw_tty_mutex;
33292 int tx_bytes_queued;
33293 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
33294 mutex_unlock(&tty->ipw_tty_mutex);
33295 return -ENODEV;
33296 }
33297 - if (tty->open_count == 0)
33298 + if (local_read(&tty->open_count) == 0)
33299 tty->tx_bytes_queued = 0;
33300
33301 - tty->open_count++;
33302 + local_inc(&tty->open_count);
33303
33304 tty->linux_tty = linux_tty;
33305 linux_tty->driver_data = tty;
33306 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
33307
33308 static void do_ipw_close(struct ipw_tty *tty)
33309 {
33310 - tty->open_count--;
33311 -
33312 - if (tty->open_count == 0) {
33313 + if (local_dec_return(&tty->open_count) == 0) {
33314 struct tty_struct *linux_tty = tty->linux_tty;
33315
33316 if (linux_tty != NULL) {
33317 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
33318 return;
33319
33320 mutex_lock(&tty->ipw_tty_mutex);
33321 - if (tty->open_count == 0) {
33322 + if (local_read(&tty->open_count) == 0) {
33323 mutex_unlock(&tty->ipw_tty_mutex);
33324 return;
33325 }
33326 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
33327 return;
33328 }
33329
33330 - if (!tty->open_count) {
33331 + if (!local_read(&tty->open_count)) {
33332 mutex_unlock(&tty->ipw_tty_mutex);
33333 return;
33334 }
33335 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
33336 return -ENODEV;
33337
33338 mutex_lock(&tty->ipw_tty_mutex);
33339 - if (!tty->open_count) {
33340 + if (!local_read(&tty->open_count)) {
33341 mutex_unlock(&tty->ipw_tty_mutex);
33342 return -EINVAL;
33343 }
33344 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
33345 if (!tty)
33346 return -ENODEV;
33347
33348 - if (!tty->open_count)
33349 + if (!local_read(&tty->open_count))
33350 return -EINVAL;
33351
33352 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
33353 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
33354 if (!tty)
33355 return 0;
33356
33357 - if (!tty->open_count)
33358 + if (!local_read(&tty->open_count))
33359 return 0;
33360
33361 return tty->tx_bytes_queued;
33362 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
33363 if (!tty)
33364 return -ENODEV;
33365
33366 - if (!tty->open_count)
33367 + if (!local_read(&tty->open_count))
33368 return -EINVAL;
33369
33370 return get_control_lines(tty);
33371 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
33372 if (!tty)
33373 return -ENODEV;
33374
33375 - if (!tty->open_count)
33376 + if (!local_read(&tty->open_count))
33377 return -EINVAL;
33378
33379 return set_control_lines(tty, set, clear);
33380 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
33381 if (!tty)
33382 return -ENODEV;
33383
33384 - if (!tty->open_count)
33385 + if (!local_read(&tty->open_count))
33386 return -EINVAL;
33387
33388 /* FIXME: Exactly how is the tty object locked here .. */
33389 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
33390 against a parallel ioctl etc */
33391 mutex_lock(&ttyj->ipw_tty_mutex);
33392 }
33393 - while (ttyj->open_count)
33394 + while (local_read(&ttyj->open_count))
33395 do_ipw_close(ttyj);
33396 ipwireless_disassociate_network_ttys(network,
33397 ttyj->channel_idx);
33398 diff -urNp linux-3.0.4/drivers/tty/n_gsm.c linux-3.0.4/drivers/tty/n_gsm.c
33399 --- linux-3.0.4/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
33400 +++ linux-3.0.4/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
33401 @@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
33402 return NULL;
33403 spin_lock_init(&dlci->lock);
33404 dlci->fifo = &dlci->_fifo;
33405 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
33406 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
33407 kfree(dlci);
33408 return NULL;
33409 }
33410 diff -urNp linux-3.0.4/drivers/tty/n_tty.c linux-3.0.4/drivers/tty/n_tty.c
33411 --- linux-3.0.4/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
33412 +++ linux-3.0.4/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
33413 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
33414 {
33415 *ops = tty_ldisc_N_TTY;
33416 ops->owner = NULL;
33417 - ops->refcount = ops->flags = 0;
33418 + atomic_set(&ops->refcount, 0);
33419 + ops->flags = 0;
33420 }
33421 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
33422 diff -urNp linux-3.0.4/drivers/tty/pty.c linux-3.0.4/drivers/tty/pty.c
33423 --- linux-3.0.4/drivers/tty/pty.c 2011-07-21 22:17:23.000000000 -0400
33424 +++ linux-3.0.4/drivers/tty/pty.c 2011-08-23 21:47:56.000000000 -0400
33425 @@ -754,8 +754,10 @@ static void __init unix98_pty_init(void)
33426 register_sysctl_table(pty_root_table);
33427
33428 /* Now create the /dev/ptmx special device */
33429 + pax_open_kernel();
33430 tty_default_fops(&ptmx_fops);
33431 - ptmx_fops.open = ptmx_open;
33432 + *(void **)&ptmx_fops.open = ptmx_open;
33433 + pax_close_kernel();
33434
33435 cdev_init(&ptmx_cdev, &ptmx_fops);
33436 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33437 diff -urNp linux-3.0.4/drivers/tty/rocket.c linux-3.0.4/drivers/tty/rocket.c
33438 --- linux-3.0.4/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
33439 +++ linux-3.0.4/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
33440 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
33441 struct rocket_ports tmp;
33442 int board;
33443
33444 + pax_track_stack();
33445 +
33446 if (!retports)
33447 return -EFAULT;
33448 memset(&tmp, 0, sizeof (tmp));
33449 diff -urNp linux-3.0.4/drivers/tty/serial/kgdboc.c linux-3.0.4/drivers/tty/serial/kgdboc.c
33450 --- linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
33451 +++ linux-3.0.4/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
33452 @@ -23,8 +23,9 @@
33453 #define MAX_CONFIG_LEN 40
33454
33455 static struct kgdb_io kgdboc_io_ops;
33456 +static struct kgdb_io kgdboc_io_ops_console;
33457
33458 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33459 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33460 static int configured = -1;
33461
33462 static char config[MAX_CONFIG_LEN];
33463 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
33464 kgdboc_unregister_kbd();
33465 if (configured == 1)
33466 kgdb_unregister_io_module(&kgdboc_io_ops);
33467 + else if (configured == 2)
33468 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
33469 }
33470
33471 static int configure_kgdboc(void)
33472 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
33473 int err;
33474 char *cptr = config;
33475 struct console *cons;
33476 + int is_console = 0;
33477
33478 err = kgdboc_option_setup(config);
33479 if (err || !strlen(config) || isspace(config[0]))
33480 goto noconfig;
33481
33482 err = -ENODEV;
33483 - kgdboc_io_ops.is_console = 0;
33484 kgdb_tty_driver = NULL;
33485
33486 kgdboc_use_kms = 0;
33487 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
33488 int idx;
33489 if (cons->device && cons->device(cons, &idx) == p &&
33490 idx == tty_line) {
33491 - kgdboc_io_ops.is_console = 1;
33492 + is_console = 1;
33493 break;
33494 }
33495 cons = cons->next;
33496 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33497 kgdb_tty_line = tty_line;
33498
33499 do_register:
33500 - err = kgdb_register_io_module(&kgdboc_io_ops);
33501 + if (is_console) {
33502 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
33503 + configured = 2;
33504 + } else {
33505 + err = kgdb_register_io_module(&kgdboc_io_ops);
33506 + configured = 1;
33507 + }
33508 if (err)
33509 goto noconfig;
33510
33511 - configured = 1;
33512 -
33513 return 0;
33514
33515 noconfig:
33516 @@ -212,7 +219,7 @@ noconfig:
33517 static int __init init_kgdboc(void)
33518 {
33519 /* Already configured? */
33520 - if (configured == 1)
33521 + if (configured >= 1)
33522 return 0;
33523
33524 return configure_kgdboc();
33525 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33526 if (config[len - 1] == '\n')
33527 config[len - 1] = '\0';
33528
33529 - if (configured == 1)
33530 + if (configured >= 1)
33531 cleanup_kgdboc();
33532
33533 /* Go and configure with the new params. */
33534 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33535 .post_exception = kgdboc_post_exp_handler,
33536 };
33537
33538 +static struct kgdb_io kgdboc_io_ops_console = {
33539 + .name = "kgdboc",
33540 + .read_char = kgdboc_get_char,
33541 + .write_char = kgdboc_put_char,
33542 + .pre_exception = kgdboc_pre_exp_handler,
33543 + .post_exception = kgdboc_post_exp_handler,
33544 + .is_console = 1
33545 +};
33546 +
33547 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33548 /* This is only available if kgdboc is a built in for early debugging */
33549 static int __init kgdboc_early_init(char *opt)
33550 diff -urNp linux-3.0.4/drivers/tty/serial/mrst_max3110.c linux-3.0.4/drivers/tty/serial/mrst_max3110.c
33551 --- linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-07-21 22:17:23.000000000 -0400
33552 +++ linux-3.0.4/drivers/tty/serial/mrst_max3110.c 2011-08-23 21:48:14.000000000 -0400
33553 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33554 int loop = 1, num, total = 0;
33555 u8 recv_buf[512], *pbuf;
33556
33557 + pax_track_stack();
33558 +
33559 pbuf = recv_buf;
33560 do {
33561 num = max3110_read_multi(max, pbuf);
33562 diff -urNp linux-3.0.4/drivers/tty/tty_io.c linux-3.0.4/drivers/tty/tty_io.c
33563 --- linux-3.0.4/drivers/tty/tty_io.c 2011-07-21 22:17:23.000000000 -0400
33564 +++ linux-3.0.4/drivers/tty/tty_io.c 2011-08-23 21:47:56.000000000 -0400
33565 @@ -3215,7 +3215,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33566
33567 void tty_default_fops(struct file_operations *fops)
33568 {
33569 - *fops = tty_fops;
33570 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33571 }
33572
33573 /*
33574 diff -urNp linux-3.0.4/drivers/tty/tty_ldisc.c linux-3.0.4/drivers/tty/tty_ldisc.c
33575 --- linux-3.0.4/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
33576 +++ linux-3.0.4/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
33577 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33578 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33579 struct tty_ldisc_ops *ldo = ld->ops;
33580
33581 - ldo->refcount--;
33582 + atomic_dec(&ldo->refcount);
33583 module_put(ldo->owner);
33584 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33585
33586 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33587 spin_lock_irqsave(&tty_ldisc_lock, flags);
33588 tty_ldiscs[disc] = new_ldisc;
33589 new_ldisc->num = disc;
33590 - new_ldisc->refcount = 0;
33591 + atomic_set(&new_ldisc->refcount, 0);
33592 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33593
33594 return ret;
33595 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33596 return -EINVAL;
33597
33598 spin_lock_irqsave(&tty_ldisc_lock, flags);
33599 - if (tty_ldiscs[disc]->refcount)
33600 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33601 ret = -EBUSY;
33602 else
33603 tty_ldiscs[disc] = NULL;
33604 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33605 if (ldops) {
33606 ret = ERR_PTR(-EAGAIN);
33607 if (try_module_get(ldops->owner)) {
33608 - ldops->refcount++;
33609 + atomic_inc(&ldops->refcount);
33610 ret = ldops;
33611 }
33612 }
33613 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33614 unsigned long flags;
33615
33616 spin_lock_irqsave(&tty_ldisc_lock, flags);
33617 - ldops->refcount--;
33618 + atomic_dec(&ldops->refcount);
33619 module_put(ldops->owner);
33620 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33621 }
33622 diff -urNp linux-3.0.4/drivers/tty/vt/keyboard.c linux-3.0.4/drivers/tty/vt/keyboard.c
33623 --- linux-3.0.4/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
33624 +++ linux-3.0.4/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
33625 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
33626 kbd->kbdmode == VC_OFF) &&
33627 value != KVAL(K_SAK))
33628 return; /* SAK is allowed even in raw mode */
33629 +
33630 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33631 + {
33632 + void *func = fn_handler[value];
33633 + if (func == fn_show_state || func == fn_show_ptregs ||
33634 + func == fn_show_mem)
33635 + return;
33636 + }
33637 +#endif
33638 +
33639 fn_handler[value](vc);
33640 }
33641
33642 diff -urNp linux-3.0.4/drivers/tty/vt/vt.c linux-3.0.4/drivers/tty/vt/vt.c
33643 --- linux-3.0.4/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
33644 +++ linux-3.0.4/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
33645 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33646
33647 static void notify_write(struct vc_data *vc, unsigned int unicode)
33648 {
33649 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33650 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33651 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33652 }
33653
33654 diff -urNp linux-3.0.4/drivers/tty/vt/vt_ioctl.c linux-3.0.4/drivers/tty/vt/vt_ioctl.c
33655 --- linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
33656 +++ linux-3.0.4/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
33657 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33658 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33659 return -EFAULT;
33660
33661 - if (!capable(CAP_SYS_TTY_CONFIG))
33662 - perm = 0;
33663 -
33664 switch (cmd) {
33665 case KDGKBENT:
33666 key_map = key_maps[s];
33667 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33668 val = (i ? K_HOLE : K_NOSUCHMAP);
33669 return put_user(val, &user_kbe->kb_value);
33670 case KDSKBENT:
33671 + if (!capable(CAP_SYS_TTY_CONFIG))
33672 + perm = 0;
33673 +
33674 if (!perm)
33675 return -EPERM;
33676 if (!i && v == K_NOSUCHMAP) {
33677 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33678 int i, j, k;
33679 int ret;
33680
33681 - if (!capable(CAP_SYS_TTY_CONFIG))
33682 - perm = 0;
33683 -
33684 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33685 if (!kbs) {
33686 ret = -ENOMEM;
33687 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33688 kfree(kbs);
33689 return ((p && *p) ? -EOVERFLOW : 0);
33690 case KDSKBSENT:
33691 + if (!capable(CAP_SYS_TTY_CONFIG))
33692 + perm = 0;
33693 +
33694 if (!perm) {
33695 ret = -EPERM;
33696 goto reterr;
33697 diff -urNp linux-3.0.4/drivers/uio/uio.c linux-3.0.4/drivers/uio/uio.c
33698 --- linux-3.0.4/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
33699 +++ linux-3.0.4/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
33700 @@ -25,6 +25,7 @@
33701 #include <linux/kobject.h>
33702 #include <linux/cdev.h>
33703 #include <linux/uio_driver.h>
33704 +#include <asm/local.h>
33705
33706 #define UIO_MAX_DEVICES (1U << MINORBITS)
33707
33708 @@ -32,10 +33,10 @@ struct uio_device {
33709 struct module *owner;
33710 struct device *dev;
33711 int minor;
33712 - atomic_t event;
33713 + atomic_unchecked_t event;
33714 struct fasync_struct *async_queue;
33715 wait_queue_head_t wait;
33716 - int vma_count;
33717 + local_t vma_count;
33718 struct uio_info *info;
33719 struct kobject *map_dir;
33720 struct kobject *portio_dir;
33721 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33722 struct device_attribute *attr, char *buf)
33723 {
33724 struct uio_device *idev = dev_get_drvdata(dev);
33725 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33726 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33727 }
33728
33729 static struct device_attribute uio_class_attributes[] = {
33730 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
33731 {
33732 struct uio_device *idev = info->uio_dev;
33733
33734 - atomic_inc(&idev->event);
33735 + atomic_inc_unchecked(&idev->event);
33736 wake_up_interruptible(&idev->wait);
33737 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33738 }
33739 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
33740 }
33741
33742 listener->dev = idev;
33743 - listener->event_count = atomic_read(&idev->event);
33744 + listener->event_count = atomic_read_unchecked(&idev->event);
33745 filep->private_data = listener;
33746
33747 if (idev->info->open) {
33748 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
33749 return -EIO;
33750
33751 poll_wait(filep, &idev->wait, wait);
33752 - if (listener->event_count != atomic_read(&idev->event))
33753 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33754 return POLLIN | POLLRDNORM;
33755 return 0;
33756 }
33757 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
33758 do {
33759 set_current_state(TASK_INTERRUPTIBLE);
33760
33761 - event_count = atomic_read(&idev->event);
33762 + event_count = atomic_read_unchecked(&idev->event);
33763 if (event_count != listener->event_count) {
33764 if (copy_to_user(buf, &event_count, count))
33765 retval = -EFAULT;
33766 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
33767 static void uio_vma_open(struct vm_area_struct *vma)
33768 {
33769 struct uio_device *idev = vma->vm_private_data;
33770 - idev->vma_count++;
33771 + local_inc(&idev->vma_count);
33772 }
33773
33774 static void uio_vma_close(struct vm_area_struct *vma)
33775 {
33776 struct uio_device *idev = vma->vm_private_data;
33777 - idev->vma_count--;
33778 + local_dec(&idev->vma_count);
33779 }
33780
33781 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33782 @@ -823,7 +824,7 @@ int __uio_register_device(struct module
33783 idev->owner = owner;
33784 idev->info = info;
33785 init_waitqueue_head(&idev->wait);
33786 - atomic_set(&idev->event, 0);
33787 + atomic_set_unchecked(&idev->event, 0);
33788
33789 ret = uio_get_minor(idev);
33790 if (ret)
33791 diff -urNp linux-3.0.4/drivers/usb/atm/cxacru.c linux-3.0.4/drivers/usb/atm/cxacru.c
33792 --- linux-3.0.4/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
33793 +++ linux-3.0.4/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
33794 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33795 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33796 if (ret < 2)
33797 return -EINVAL;
33798 - if (index < 0 || index > 0x7f)
33799 + if (index > 0x7f)
33800 return -EINVAL;
33801 pos += tmp;
33802
33803 diff -urNp linux-3.0.4/drivers/usb/atm/usbatm.c linux-3.0.4/drivers/usb/atm/usbatm.c
33804 --- linux-3.0.4/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
33805 +++ linux-3.0.4/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
33806 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33807 if (printk_ratelimit())
33808 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33809 __func__, vpi, vci);
33810 - atomic_inc(&vcc->stats->rx_err);
33811 + atomic_inc_unchecked(&vcc->stats->rx_err);
33812 return;
33813 }
33814
33815 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33816 if (length > ATM_MAX_AAL5_PDU) {
33817 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33818 __func__, length, vcc);
33819 - atomic_inc(&vcc->stats->rx_err);
33820 + atomic_inc_unchecked(&vcc->stats->rx_err);
33821 goto out;
33822 }
33823
33824 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33825 if (sarb->len < pdu_length) {
33826 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33827 __func__, pdu_length, sarb->len, vcc);
33828 - atomic_inc(&vcc->stats->rx_err);
33829 + atomic_inc_unchecked(&vcc->stats->rx_err);
33830 goto out;
33831 }
33832
33833 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33834 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33835 __func__, vcc);
33836 - atomic_inc(&vcc->stats->rx_err);
33837 + atomic_inc_unchecked(&vcc->stats->rx_err);
33838 goto out;
33839 }
33840
33841 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33842 if (printk_ratelimit())
33843 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33844 __func__, length);
33845 - atomic_inc(&vcc->stats->rx_drop);
33846 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33847 goto out;
33848 }
33849
33850 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33851
33852 vcc->push(vcc, skb);
33853
33854 - atomic_inc(&vcc->stats->rx);
33855 + atomic_inc_unchecked(&vcc->stats->rx);
33856 out:
33857 skb_trim(sarb, 0);
33858 }
33859 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33860 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33861
33862 usbatm_pop(vcc, skb);
33863 - atomic_inc(&vcc->stats->tx);
33864 + atomic_inc_unchecked(&vcc->stats->tx);
33865
33866 skb = skb_dequeue(&instance->sndqueue);
33867 }
33868 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33869 if (!left--)
33870 return sprintf(page,
33871 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33872 - atomic_read(&atm_dev->stats.aal5.tx),
33873 - atomic_read(&atm_dev->stats.aal5.tx_err),
33874 - atomic_read(&atm_dev->stats.aal5.rx),
33875 - atomic_read(&atm_dev->stats.aal5.rx_err),
33876 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33877 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33878 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33879 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33880 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33881 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33882
33883 if (!left--) {
33884 if (instance->disconnected)
33885 diff -urNp linux-3.0.4/drivers/usb/core/devices.c linux-3.0.4/drivers/usb/core/devices.c
33886 --- linux-3.0.4/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
33887 +++ linux-3.0.4/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
33888 @@ -126,7 +126,7 @@ static const char format_endpt[] =
33889 * time it gets called.
33890 */
33891 static struct device_connect_event {
33892 - atomic_t count;
33893 + atomic_unchecked_t count;
33894 wait_queue_head_t wait;
33895 } device_event = {
33896 .count = ATOMIC_INIT(1),
33897 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33898
33899 void usbfs_conn_disc_event(void)
33900 {
33901 - atomic_add(2, &device_event.count);
33902 + atomic_add_unchecked(2, &device_event.count);
33903 wake_up(&device_event.wait);
33904 }
33905
33906 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33907
33908 poll_wait(file, &device_event.wait, wait);
33909
33910 - event_count = atomic_read(&device_event.count);
33911 + event_count = atomic_read_unchecked(&device_event.count);
33912 if (file->f_version != event_count) {
33913 file->f_version = event_count;
33914 return POLLIN | POLLRDNORM;
33915 diff -urNp linux-3.0.4/drivers/usb/core/message.c linux-3.0.4/drivers/usb/core/message.c
33916 --- linux-3.0.4/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
33917 +++ linux-3.0.4/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
33918 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33919 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33920 if (buf) {
33921 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33922 - if (len > 0) {
33923 - smallbuf = kmalloc(++len, GFP_NOIO);
33924 + if (len++ > 0) {
33925 + smallbuf = kmalloc(len, GFP_NOIO);
33926 if (!smallbuf)
33927 return buf;
33928 memcpy(smallbuf, buf, len);
33929 diff -urNp linux-3.0.4/drivers/usb/early/ehci-dbgp.c linux-3.0.4/drivers/usb/early/ehci-dbgp.c
33930 --- linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
33931 +++ linux-3.0.4/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
33932 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33933
33934 #ifdef CONFIG_KGDB
33935 static struct kgdb_io kgdbdbgp_io_ops;
33936 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33937 +static struct kgdb_io kgdbdbgp_io_ops_console;
33938 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33939 #else
33940 #define dbgp_kgdb_mode (0)
33941 #endif
33942 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33943 .write_char = kgdbdbgp_write_char,
33944 };
33945
33946 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33947 + .name = "kgdbdbgp",
33948 + .read_char = kgdbdbgp_read_char,
33949 + .write_char = kgdbdbgp_write_char,
33950 + .is_console = 1
33951 +};
33952 +
33953 static int kgdbdbgp_wait_time;
33954
33955 static int __init kgdbdbgp_parse_config(char *str)
33956 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
33957 ptr++;
33958 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33959 }
33960 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33961 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33962 + if (early_dbgp_console.index != -1)
33963 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33964 + else
33965 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33966
33967 return 0;
33968 }
33969 diff -urNp linux-3.0.4/drivers/usb/host/xhci-mem.c linux-3.0.4/drivers/usb/host/xhci-mem.c
33970 --- linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
33971 +++ linux-3.0.4/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
33972 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
33973 unsigned int num_tests;
33974 int i, ret;
33975
33976 + pax_track_stack();
33977 +
33978 num_tests = ARRAY_SIZE(simple_test_vector);
33979 for (i = 0; i < num_tests; i++) {
33980 ret = xhci_test_trb_in_td(xhci,
33981 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-hc.h linux-3.0.4/drivers/usb/wusbcore/wa-hc.h
33982 --- linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
33983 +++ linux-3.0.4/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
33984 @@ -192,7 +192,7 @@ struct wahc {
33985 struct list_head xfer_delayed_list;
33986 spinlock_t xfer_list_lock;
33987 struct work_struct xfer_work;
33988 - atomic_t xfer_id_count;
33989 + atomic_unchecked_t xfer_id_count;
33990 };
33991
33992
33993 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33994 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33995 spin_lock_init(&wa->xfer_list_lock);
33996 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33997 - atomic_set(&wa->xfer_id_count, 1);
33998 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33999 }
34000
34001 /**
34002 diff -urNp linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c
34003 --- linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
34004 +++ linux-3.0.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
34005 @@ -294,7 +294,7 @@ out:
34006 */
34007 static void wa_xfer_id_init(struct wa_xfer *xfer)
34008 {
34009 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
34010 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
34011 }
34012
34013 /*
34014 diff -urNp linux-3.0.4/drivers/vhost/vhost.c linux-3.0.4/drivers/vhost/vhost.c
34015 --- linux-3.0.4/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
34016 +++ linux-3.0.4/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
34017 @@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
34018 return get_user(vq->last_used_idx, &used->idx);
34019 }
34020
34021 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
34022 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
34023 {
34024 struct file *eventfp, *filep = NULL,
34025 *pollstart = NULL, *pollstop = NULL;
34026 diff -urNp linux-3.0.4/drivers/video/fbcmap.c linux-3.0.4/drivers/video/fbcmap.c
34027 --- linux-3.0.4/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
34028 +++ linux-3.0.4/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
34029 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
34030 rc = -ENODEV;
34031 goto out;
34032 }
34033 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
34034 - !info->fbops->fb_setcmap)) {
34035 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
34036 rc = -EINVAL;
34037 goto out1;
34038 }
34039 diff -urNp linux-3.0.4/drivers/video/fbmem.c linux-3.0.4/drivers/video/fbmem.c
34040 --- linux-3.0.4/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
34041 +++ linux-3.0.4/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
34042 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
34043 image->dx += image->width + 8;
34044 }
34045 } else if (rotate == FB_ROTATE_UD) {
34046 - for (x = 0; x < num && image->dx >= 0; x++) {
34047 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
34048 info->fbops->fb_imageblit(info, image);
34049 image->dx -= image->width + 8;
34050 }
34051 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
34052 image->dy += image->height + 8;
34053 }
34054 } else if (rotate == FB_ROTATE_CCW) {
34055 - for (x = 0; x < num && image->dy >= 0; x++) {
34056 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
34057 info->fbops->fb_imageblit(info, image);
34058 image->dy -= image->height + 8;
34059 }
34060 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
34061 int flags = info->flags;
34062 int ret = 0;
34063
34064 + pax_track_stack();
34065 +
34066 if (var->activate & FB_ACTIVATE_INV_MODE) {
34067 struct fb_videomode mode1, mode2;
34068
34069 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
34070 void __user *argp = (void __user *)arg;
34071 long ret = 0;
34072
34073 + pax_track_stack();
34074 +
34075 switch (cmd) {
34076 case FBIOGET_VSCREENINFO:
34077 if (!lock_fb_info(info))
34078 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
34079 return -EFAULT;
34080 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
34081 return -EINVAL;
34082 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
34083 + if (con2fb.framebuffer >= FB_MAX)
34084 return -EINVAL;
34085 if (!registered_fb[con2fb.framebuffer])
34086 request_module("fb%d", con2fb.framebuffer);
34087 diff -urNp linux-3.0.4/drivers/video/i810/i810_accel.c linux-3.0.4/drivers/video/i810/i810_accel.c
34088 --- linux-3.0.4/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
34089 +++ linux-3.0.4/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
34090 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
34091 }
34092 }
34093 printk("ringbuffer lockup!!!\n");
34094 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
34095 i810_report_error(mmio);
34096 par->dev_flags |= LOCKUP;
34097 info->pixmap.scan_align = 1;
34098 diff -urNp linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm
34099 --- linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
34100 +++ linux-3.0.4/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
34101 @@ -1,1604 +1,1123 @@
34102 P3
34103 -# Standard 224-color Linux logo
34104 80 80
34105 255
34106 - 0 0 0 0 0 0 0 0 0 0 0 0
34107 - 0 0 0 0 0 0 0 0 0 0 0 0
34108 - 0 0 0 0 0 0 0 0 0 0 0 0
34109 - 0 0 0 0 0 0 0 0 0 0 0 0
34110 - 0 0 0 0 0 0 0 0 0 0 0 0
34111 - 0 0 0 0 0 0 0 0 0 0 0 0
34112 - 0 0 0 0 0 0 0 0 0 0 0 0
34113 - 0 0 0 0 0 0 0 0 0 0 0 0
34114 - 0 0 0 0 0 0 0 0 0 0 0 0
34115 - 6 6 6 6 6 6 10 10 10 10 10 10
34116 - 10 10 10 6 6 6 6 6 6 6 6 6
34117 - 0 0 0 0 0 0 0 0 0 0 0 0
34118 - 0 0 0 0 0 0 0 0 0 0 0 0
34119 - 0 0 0 0 0 0 0 0 0 0 0 0
34120 - 0 0 0 0 0 0 0 0 0 0 0 0
34121 - 0 0 0 0 0 0 0 0 0 0 0 0
34122 - 0 0 0 0 0 0 0 0 0 0 0 0
34123 - 0 0 0 0 0 0 0 0 0 0 0 0
34124 - 0 0 0 0 0 0 0 0 0 0 0 0
34125 - 0 0 0 0 0 0 0 0 0 0 0 0
34126 - 0 0 0 0 0 0 0 0 0 0 0 0
34127 - 0 0 0 0 0 0 0 0 0 0 0 0
34128 - 0 0 0 0 0 0 0 0 0 0 0 0
34129 - 0 0 0 0 0 0 0 0 0 0 0 0
34130 - 0 0 0 0 0 0 0 0 0 0 0 0
34131 - 0 0 0 0 0 0 0 0 0 0 0 0
34132 - 0 0 0 0 0 0 0 0 0 0 0 0
34133 - 0 0 0 0 0 0 0 0 0 0 0 0
34134 - 0 0 0 6 6 6 10 10 10 14 14 14
34135 - 22 22 22 26 26 26 30 30 30 34 34 34
34136 - 30 30 30 30 30 30 26 26 26 18 18 18
34137 - 14 14 14 10 10 10 6 6 6 0 0 0
34138 - 0 0 0 0 0 0 0 0 0 0 0 0
34139 - 0 0 0 0 0 0 0 0 0 0 0 0
34140 - 0 0 0 0 0 0 0 0 0 0 0 0
34141 - 0 0 0 0 0 0 0 0 0 0 0 0
34142 - 0 0 0 0 0 0 0 0 0 0 0 0
34143 - 0 0 0 0 0 0 0 0 0 0 0 0
34144 - 0 0 0 0 0 0 0 0 0 0 0 0
34145 - 0 0 0 0 0 0 0 0 0 0 0 0
34146 - 0 0 0 0 0 0 0 0 0 0 0 0
34147 - 0 0 0 0 0 1 0 0 1 0 0 0
34148 - 0 0 0 0 0 0 0 0 0 0 0 0
34149 - 0 0 0 0 0 0 0 0 0 0 0 0
34150 - 0 0 0 0 0 0 0 0 0 0 0 0
34151 - 0 0 0 0 0 0 0 0 0 0 0 0
34152 - 0 0 0 0 0 0 0 0 0 0 0 0
34153 - 0 0 0 0 0 0 0 0 0 0 0 0
34154 - 6 6 6 14 14 14 26 26 26 42 42 42
34155 - 54 54 54 66 66 66 78 78 78 78 78 78
34156 - 78 78 78 74 74 74 66 66 66 54 54 54
34157 - 42 42 42 26 26 26 18 18 18 10 10 10
34158 - 6 6 6 0 0 0 0 0 0 0 0 0
34159 - 0 0 0 0 0 0 0 0 0 0 0 0
34160 - 0 0 0 0 0 0 0 0 0 0 0 0
34161 - 0 0 0 0 0 0 0 0 0 0 0 0
34162 - 0 0 0 0 0 0 0 0 0 0 0 0
34163 - 0 0 0 0 0 0 0 0 0 0 0 0
34164 - 0 0 0 0 0 0 0 0 0 0 0 0
34165 - 0 0 0 0 0 0 0 0 0 0 0 0
34166 - 0 0 0 0 0 0 0 0 0 0 0 0
34167 - 0 0 1 0 0 0 0 0 0 0 0 0
34168 - 0 0 0 0 0 0 0 0 0 0 0 0
34169 - 0 0 0 0 0 0 0 0 0 0 0 0
34170 - 0 0 0 0 0 0 0 0 0 0 0 0
34171 - 0 0 0 0 0 0 0 0 0 0 0 0
34172 - 0 0 0 0 0 0 0 0 0 0 0 0
34173 - 0 0 0 0 0 0 0 0 0 10 10 10
34174 - 22 22 22 42 42 42 66 66 66 86 86 86
34175 - 66 66 66 38 38 38 38 38 38 22 22 22
34176 - 26 26 26 34 34 34 54 54 54 66 66 66
34177 - 86 86 86 70 70 70 46 46 46 26 26 26
34178 - 14 14 14 6 6 6 0 0 0 0 0 0
34179 - 0 0 0 0 0 0 0 0 0 0 0 0
34180 - 0 0 0 0 0 0 0 0 0 0 0 0
34181 - 0 0 0 0 0 0 0 0 0 0 0 0
34182 - 0 0 0 0 0 0 0 0 0 0 0 0
34183 - 0 0 0 0 0 0 0 0 0 0 0 0
34184 - 0 0 0 0 0 0 0 0 0 0 0 0
34185 - 0 0 0 0 0 0 0 0 0 0 0 0
34186 - 0 0 0 0 0 0 0 0 0 0 0 0
34187 - 0 0 1 0 0 1 0 0 1 0 0 0
34188 - 0 0 0 0 0 0 0 0 0 0 0 0
34189 - 0 0 0 0 0 0 0 0 0 0 0 0
34190 - 0 0 0 0 0 0 0 0 0 0 0 0
34191 - 0 0 0 0 0 0 0 0 0 0 0 0
34192 - 0 0 0 0 0 0 0 0 0 0 0 0
34193 - 0 0 0 0 0 0 10 10 10 26 26 26
34194 - 50 50 50 82 82 82 58 58 58 6 6 6
34195 - 2 2 6 2 2 6 2 2 6 2 2 6
34196 - 2 2 6 2 2 6 2 2 6 2 2 6
34197 - 6 6 6 54 54 54 86 86 86 66 66 66
34198 - 38 38 38 18 18 18 6 6 6 0 0 0
34199 - 0 0 0 0 0 0 0 0 0 0 0 0
34200 - 0 0 0 0 0 0 0 0 0 0 0 0
34201 - 0 0 0 0 0 0 0 0 0 0 0 0
34202 - 0 0 0 0 0 0 0 0 0 0 0 0
34203 - 0 0 0 0 0 0 0 0 0 0 0 0
34204 - 0 0 0 0 0 0 0 0 0 0 0 0
34205 - 0 0 0 0 0 0 0 0 0 0 0 0
34206 - 0 0 0 0 0 0 0 0 0 0 0 0
34207 - 0 0 0 0 0 0 0 0 0 0 0 0
34208 - 0 0 0 0 0 0 0 0 0 0 0 0
34209 - 0 0 0 0 0 0 0 0 0 0 0 0
34210 - 0 0 0 0 0 0 0 0 0 0 0 0
34211 - 0 0 0 0 0 0 0 0 0 0 0 0
34212 - 0 0 0 0 0 0 0 0 0 0 0 0
34213 - 0 0 0 6 6 6 22 22 22 50 50 50
34214 - 78 78 78 34 34 34 2 2 6 2 2 6
34215 - 2 2 6 2 2 6 2 2 6 2 2 6
34216 - 2 2 6 2 2 6 2 2 6 2 2 6
34217 - 2 2 6 2 2 6 6 6 6 70 70 70
34218 - 78 78 78 46 46 46 22 22 22 6 6 6
34219 - 0 0 0 0 0 0 0 0 0 0 0 0
34220 - 0 0 0 0 0 0 0 0 0 0 0 0
34221 - 0 0 0 0 0 0 0 0 0 0 0 0
34222 - 0 0 0 0 0 0 0 0 0 0 0 0
34223 - 0 0 0 0 0 0 0 0 0 0 0 0
34224 - 0 0 0 0 0 0 0 0 0 0 0 0
34225 - 0 0 0 0 0 0 0 0 0 0 0 0
34226 - 0 0 0 0 0 0 0 0 0 0 0 0
34227 - 0 0 1 0 0 1 0 0 1 0 0 0
34228 - 0 0 0 0 0 0 0 0 0 0 0 0
34229 - 0 0 0 0 0 0 0 0 0 0 0 0
34230 - 0 0 0 0 0 0 0 0 0 0 0 0
34231 - 0 0 0 0 0 0 0 0 0 0 0 0
34232 - 0 0 0 0 0 0 0 0 0 0 0 0
34233 - 6 6 6 18 18 18 42 42 42 82 82 82
34234 - 26 26 26 2 2 6 2 2 6 2 2 6
34235 - 2 2 6 2 2 6 2 2 6 2 2 6
34236 - 2 2 6 2 2 6 2 2 6 14 14 14
34237 - 46 46 46 34 34 34 6 6 6 2 2 6
34238 - 42 42 42 78 78 78 42 42 42 18 18 18
34239 - 6 6 6 0 0 0 0 0 0 0 0 0
34240 - 0 0 0 0 0 0 0 0 0 0 0 0
34241 - 0 0 0 0 0 0 0 0 0 0 0 0
34242 - 0 0 0 0 0 0 0 0 0 0 0 0
34243 - 0 0 0 0 0 0 0 0 0 0 0 0
34244 - 0 0 0 0 0 0 0 0 0 0 0 0
34245 - 0 0 0 0 0 0 0 0 0 0 0 0
34246 - 0 0 0 0 0 0 0 0 0 0 0 0
34247 - 0 0 1 0 0 0 0 0 1 0 0 0
34248 - 0 0 0 0 0 0 0 0 0 0 0 0
34249 - 0 0 0 0 0 0 0 0 0 0 0 0
34250 - 0 0 0 0 0 0 0 0 0 0 0 0
34251 - 0 0 0 0 0 0 0 0 0 0 0 0
34252 - 0 0 0 0 0 0 0 0 0 0 0 0
34253 - 10 10 10 30 30 30 66 66 66 58 58 58
34254 - 2 2 6 2 2 6 2 2 6 2 2 6
34255 - 2 2 6 2 2 6 2 2 6 2 2 6
34256 - 2 2 6 2 2 6 2 2 6 26 26 26
34257 - 86 86 86 101 101 101 46 46 46 10 10 10
34258 - 2 2 6 58 58 58 70 70 70 34 34 34
34259 - 10 10 10 0 0 0 0 0 0 0 0 0
34260 - 0 0 0 0 0 0 0 0 0 0 0 0
34261 - 0 0 0 0 0 0 0 0 0 0 0 0
34262 - 0 0 0 0 0 0 0 0 0 0 0 0
34263 - 0 0 0 0 0 0 0 0 0 0 0 0
34264 - 0 0 0 0 0 0 0 0 0 0 0 0
34265 - 0 0 0 0 0 0 0 0 0 0 0 0
34266 - 0 0 0 0 0 0 0 0 0 0 0 0
34267 - 0 0 1 0 0 1 0 0 1 0 0 0
34268 - 0 0 0 0 0 0 0 0 0 0 0 0
34269 - 0 0 0 0 0 0 0 0 0 0 0 0
34270 - 0 0 0 0 0 0 0 0 0 0 0 0
34271 - 0 0 0 0 0 0 0 0 0 0 0 0
34272 - 0 0 0 0 0 0 0 0 0 0 0 0
34273 - 14 14 14 42 42 42 86 86 86 10 10 10
34274 - 2 2 6 2 2 6 2 2 6 2 2 6
34275 - 2 2 6 2 2 6 2 2 6 2 2 6
34276 - 2 2 6 2 2 6 2 2 6 30 30 30
34277 - 94 94 94 94 94 94 58 58 58 26 26 26
34278 - 2 2 6 6 6 6 78 78 78 54 54 54
34279 - 22 22 22 6 6 6 0 0 0 0 0 0
34280 - 0 0 0 0 0 0 0 0 0 0 0 0
34281 - 0 0 0 0 0 0 0 0 0 0 0 0
34282 - 0 0 0 0 0 0 0 0 0 0 0 0
34283 - 0 0 0 0 0 0 0 0 0 0 0 0
34284 - 0 0 0 0 0 0 0 0 0 0 0 0
34285 - 0 0 0 0 0 0 0 0 0 0 0 0
34286 - 0 0 0 0 0 0 0 0 0 0 0 0
34287 - 0 0 0 0 0 0 0 0 0 0 0 0
34288 - 0 0 0 0 0 0 0 0 0 0 0 0
34289 - 0 0 0 0 0 0 0 0 0 0 0 0
34290 - 0 0 0 0 0 0 0 0 0 0 0 0
34291 - 0 0 0 0 0 0 0 0 0 0 0 0
34292 - 0 0 0 0 0 0 0 0 0 6 6 6
34293 - 22 22 22 62 62 62 62 62 62 2 2 6
34294 - 2 2 6 2 2 6 2 2 6 2 2 6
34295 - 2 2 6 2 2 6 2 2 6 2 2 6
34296 - 2 2 6 2 2 6 2 2 6 26 26 26
34297 - 54 54 54 38 38 38 18 18 18 10 10 10
34298 - 2 2 6 2 2 6 34 34 34 82 82 82
34299 - 38 38 38 14 14 14 0 0 0 0 0 0
34300 - 0 0 0 0 0 0 0 0 0 0 0 0
34301 - 0 0 0 0 0 0 0 0 0 0 0 0
34302 - 0 0 0 0 0 0 0 0 0 0 0 0
34303 - 0 0 0 0 0 0 0 0 0 0 0 0
34304 - 0 0 0 0 0 0 0 0 0 0 0 0
34305 - 0 0 0 0 0 0 0 0 0 0 0 0
34306 - 0 0 0 0 0 0 0 0 0 0 0 0
34307 - 0 0 0 0 0 1 0 0 1 0 0 0
34308 - 0 0 0 0 0 0 0 0 0 0 0 0
34309 - 0 0 0 0 0 0 0 0 0 0 0 0
34310 - 0 0 0 0 0 0 0 0 0 0 0 0
34311 - 0 0 0 0 0 0 0 0 0 0 0 0
34312 - 0 0 0 0 0 0 0 0 0 6 6 6
34313 - 30 30 30 78 78 78 30 30 30 2 2 6
34314 - 2 2 6 2 2 6 2 2 6 2 2 6
34315 - 2 2 6 2 2 6 2 2 6 2 2 6
34316 - 2 2 6 2 2 6 2 2 6 10 10 10
34317 - 10 10 10 2 2 6 2 2 6 2 2 6
34318 - 2 2 6 2 2 6 2 2 6 78 78 78
34319 - 50 50 50 18 18 18 6 6 6 0 0 0
34320 - 0 0 0 0 0 0 0 0 0 0 0 0
34321 - 0 0 0 0 0 0 0 0 0 0 0 0
34322 - 0 0 0 0 0 0 0 0 0 0 0 0
34323 - 0 0 0 0 0 0 0 0 0 0 0 0
34324 - 0 0 0 0 0 0 0 0 0 0 0 0
34325 - 0 0 0 0 0 0 0 0 0 0 0 0
34326 - 0 0 0 0 0 0 0 0 0 0 0 0
34327 - 0 0 1 0 0 0 0 0 0 0 0 0
34328 - 0 0 0 0 0 0 0 0 0 0 0 0
34329 - 0 0 0 0 0 0 0 0 0 0 0 0
34330 - 0 0 0 0 0 0 0 0 0 0 0 0
34331 - 0 0 0 0 0 0 0 0 0 0 0 0
34332 - 0 0 0 0 0 0 0 0 0 10 10 10
34333 - 38 38 38 86 86 86 14 14 14 2 2 6
34334 - 2 2 6 2 2 6 2 2 6 2 2 6
34335 - 2 2 6 2 2 6 2 2 6 2 2 6
34336 - 2 2 6 2 2 6 2 2 6 2 2 6
34337 - 2 2 6 2 2 6 2 2 6 2 2 6
34338 - 2 2 6 2 2 6 2 2 6 54 54 54
34339 - 66 66 66 26 26 26 6 6 6 0 0 0
34340 - 0 0 0 0 0 0 0 0 0 0 0 0
34341 - 0 0 0 0 0 0 0 0 0 0 0 0
34342 - 0 0 0 0 0 0 0 0 0 0 0 0
34343 - 0 0 0 0 0 0 0 0 0 0 0 0
34344 - 0 0 0 0 0 0 0 0 0 0 0 0
34345 - 0 0 0 0 0 0 0 0 0 0 0 0
34346 - 0 0 0 0 0 0 0 0 0 0 0 0
34347 - 0 0 0 0 0 1 0 0 1 0 0 0
34348 - 0 0 0 0 0 0 0 0 0 0 0 0
34349 - 0 0 0 0 0 0 0 0 0 0 0 0
34350 - 0 0 0 0 0 0 0 0 0 0 0 0
34351 - 0 0 0 0 0 0 0 0 0 0 0 0
34352 - 0 0 0 0 0 0 0 0 0 14 14 14
34353 - 42 42 42 82 82 82 2 2 6 2 2 6
34354 - 2 2 6 6 6 6 10 10 10 2 2 6
34355 - 2 2 6 2 2 6 2 2 6 2 2 6
34356 - 2 2 6 2 2 6 2 2 6 6 6 6
34357 - 14 14 14 10 10 10 2 2 6 2 2 6
34358 - 2 2 6 2 2 6 2 2 6 18 18 18
34359 - 82 82 82 34 34 34 10 10 10 0 0 0
34360 - 0 0 0 0 0 0 0 0 0 0 0 0
34361 - 0 0 0 0 0 0 0 0 0 0 0 0
34362 - 0 0 0 0 0 0 0 0 0 0 0 0
34363 - 0 0 0 0 0 0 0 0 0 0 0 0
34364 - 0 0 0 0 0 0 0 0 0 0 0 0
34365 - 0 0 0 0 0 0 0 0 0 0 0 0
34366 - 0 0 0 0 0 0 0 0 0 0 0 0
34367 - 0 0 1 0 0 0 0 0 0 0 0 0
34368 - 0 0 0 0 0 0 0 0 0 0 0 0
34369 - 0 0 0 0 0 0 0 0 0 0 0 0
34370 - 0 0 0 0 0 0 0 0 0 0 0 0
34371 - 0 0 0 0 0 0 0 0 0 0 0 0
34372 - 0 0 0 0 0 0 0 0 0 14 14 14
34373 - 46 46 46 86 86 86 2 2 6 2 2 6
34374 - 6 6 6 6 6 6 22 22 22 34 34 34
34375 - 6 6 6 2 2 6 2 2 6 2 2 6
34376 - 2 2 6 2 2 6 18 18 18 34 34 34
34377 - 10 10 10 50 50 50 22 22 22 2 2 6
34378 - 2 2 6 2 2 6 2 2 6 10 10 10
34379 - 86 86 86 42 42 42 14 14 14 0 0 0
34380 - 0 0 0 0 0 0 0 0 0 0 0 0
34381 - 0 0 0 0 0 0 0 0 0 0 0 0
34382 - 0 0 0 0 0 0 0 0 0 0 0 0
34383 - 0 0 0 0 0 0 0 0 0 0 0 0
34384 - 0 0 0 0 0 0 0 0 0 0 0 0
34385 - 0 0 0 0 0 0 0 0 0 0 0 0
34386 - 0 0 0 0 0 0 0 0 0 0 0 0
34387 - 0 0 1 0 0 1 0 0 1 0 0 0
34388 - 0 0 0 0 0 0 0 0 0 0 0 0
34389 - 0 0 0 0 0 0 0 0 0 0 0 0
34390 - 0 0 0 0 0 0 0 0 0 0 0 0
34391 - 0 0 0 0 0 0 0 0 0 0 0 0
34392 - 0 0 0 0 0 0 0 0 0 14 14 14
34393 - 46 46 46 86 86 86 2 2 6 2 2 6
34394 - 38 38 38 116 116 116 94 94 94 22 22 22
34395 - 22 22 22 2 2 6 2 2 6 2 2 6
34396 - 14 14 14 86 86 86 138 138 138 162 162 162
34397 -154 154 154 38 38 38 26 26 26 6 6 6
34398 - 2 2 6 2 2 6 2 2 6 2 2 6
34399 - 86 86 86 46 46 46 14 14 14 0 0 0
34400 - 0 0 0 0 0 0 0 0 0 0 0 0
34401 - 0 0 0 0 0 0 0 0 0 0 0 0
34402 - 0 0 0 0 0 0 0 0 0 0 0 0
34403 - 0 0 0 0 0 0 0 0 0 0 0 0
34404 - 0 0 0 0 0 0 0 0 0 0 0 0
34405 - 0 0 0 0 0 0 0 0 0 0 0 0
34406 - 0 0 0 0 0 0 0 0 0 0 0 0
34407 - 0 0 0 0 0 0 0 0 0 0 0 0
34408 - 0 0 0 0 0 0 0 0 0 0 0 0
34409 - 0 0 0 0 0 0 0 0 0 0 0 0
34410 - 0 0 0 0 0 0 0 0 0 0 0 0
34411 - 0 0 0 0 0 0 0 0 0 0 0 0
34412 - 0 0 0 0 0 0 0 0 0 14 14 14
34413 - 46 46 46 86 86 86 2 2 6 14 14 14
34414 -134 134 134 198 198 198 195 195 195 116 116 116
34415 - 10 10 10 2 2 6 2 2 6 6 6 6
34416 -101 98 89 187 187 187 210 210 210 218 218 218
34417 -214 214 214 134 134 134 14 14 14 6 6 6
34418 - 2 2 6 2 2 6 2 2 6 2 2 6
34419 - 86 86 86 50 50 50 18 18 18 6 6 6
34420 - 0 0 0 0 0 0 0 0 0 0 0 0
34421 - 0 0 0 0 0 0 0 0 0 0 0 0
34422 - 0 0 0 0 0 0 0 0 0 0 0 0
34423 - 0 0 0 0 0 0 0 0 0 0 0 0
34424 - 0 0 0 0 0 0 0 0 0 0 0 0
34425 - 0 0 0 0 0 0 0 0 0 0 0 0
34426 - 0 0 0 0 0 0 0 0 1 0 0 0
34427 - 0 0 1 0 0 1 0 0 1 0 0 0
34428 - 0 0 0 0 0 0 0 0 0 0 0 0
34429 - 0 0 0 0 0 0 0 0 0 0 0 0
34430 - 0 0 0 0 0 0 0 0 0 0 0 0
34431 - 0 0 0 0 0 0 0 0 0 0 0 0
34432 - 0 0 0 0 0 0 0 0 0 14 14 14
34433 - 46 46 46 86 86 86 2 2 6 54 54 54
34434 -218 218 218 195 195 195 226 226 226 246 246 246
34435 - 58 58 58 2 2 6 2 2 6 30 30 30
34436 -210 210 210 253 253 253 174 174 174 123 123 123
34437 -221 221 221 234 234 234 74 74 74 2 2 6
34438 - 2 2 6 2 2 6 2 2 6 2 2 6
34439 - 70 70 70 58 58 58 22 22 22 6 6 6
34440 - 0 0 0 0 0 0 0 0 0 0 0 0
34441 - 0 0 0 0 0 0 0 0 0 0 0 0
34442 - 0 0 0 0 0 0 0 0 0 0 0 0
34443 - 0 0 0 0 0 0 0 0 0 0 0 0
34444 - 0 0 0 0 0 0 0 0 0 0 0 0
34445 - 0 0 0 0 0 0 0 0 0 0 0 0
34446 - 0 0 0 0 0 0 0 0 0 0 0 0
34447 - 0 0 0 0 0 0 0 0 0 0 0 0
34448 - 0 0 0 0 0 0 0 0 0 0 0 0
34449 - 0 0 0 0 0 0 0 0 0 0 0 0
34450 - 0 0 0 0 0 0 0 0 0 0 0 0
34451 - 0 0 0 0 0 0 0 0 0 0 0 0
34452 - 0 0 0 0 0 0 0 0 0 14 14 14
34453 - 46 46 46 82 82 82 2 2 6 106 106 106
34454 -170 170 170 26 26 26 86 86 86 226 226 226
34455 -123 123 123 10 10 10 14 14 14 46 46 46
34456 -231 231 231 190 190 190 6 6 6 70 70 70
34457 - 90 90 90 238 238 238 158 158 158 2 2 6
34458 - 2 2 6 2 2 6 2 2 6 2 2 6
34459 - 70 70 70 58 58 58 22 22 22 6 6 6
34460 - 0 0 0 0 0 0 0 0 0 0 0 0
34461 - 0 0 0 0 0 0 0 0 0 0 0 0
34462 - 0 0 0 0 0 0 0 0 0 0 0 0
34463 - 0 0 0 0 0 0 0 0 0 0 0 0
34464 - 0 0 0 0 0 0 0 0 0 0 0 0
34465 - 0 0 0 0 0 0 0 0 0 0 0 0
34466 - 0 0 0 0 0 0 0 0 1 0 0 0
34467 - 0 0 1 0 0 1 0 0 1 0 0 0
34468 - 0 0 0 0 0 0 0 0 0 0 0 0
34469 - 0 0 0 0 0 0 0 0 0 0 0 0
34470 - 0 0 0 0 0 0 0 0 0 0 0 0
34471 - 0 0 0 0 0 0 0 0 0 0 0 0
34472 - 0 0 0 0 0 0 0 0 0 14 14 14
34473 - 42 42 42 86 86 86 6 6 6 116 116 116
34474 -106 106 106 6 6 6 70 70 70 149 149 149
34475 -128 128 128 18 18 18 38 38 38 54 54 54
34476 -221 221 221 106 106 106 2 2 6 14 14 14
34477 - 46 46 46 190 190 190 198 198 198 2 2 6
34478 - 2 2 6 2 2 6 2 2 6 2 2 6
34479 - 74 74 74 62 62 62 22 22 22 6 6 6
34480 - 0 0 0 0 0 0 0 0 0 0 0 0
34481 - 0 0 0 0 0 0 0 0 0 0 0 0
34482 - 0 0 0 0 0 0 0 0 0 0 0 0
34483 - 0 0 0 0 0 0 0 0 0 0 0 0
34484 - 0 0 0 0 0 0 0 0 0 0 0 0
34485 - 0 0 0 0 0 0 0 0 0 0 0 0
34486 - 0 0 0 0 0 0 0 0 1 0 0 0
34487 - 0 0 1 0 0 0 0 0 1 0 0 0
34488 - 0 0 0 0 0 0 0 0 0 0 0 0
34489 - 0 0 0 0 0 0 0 0 0 0 0 0
34490 - 0 0 0 0 0 0 0 0 0 0 0 0
34491 - 0 0 0 0 0 0 0 0 0 0 0 0
34492 - 0 0 0 0 0 0 0 0 0 14 14 14
34493 - 42 42 42 94 94 94 14 14 14 101 101 101
34494 -128 128 128 2 2 6 18 18 18 116 116 116
34495 -118 98 46 121 92 8 121 92 8 98 78 10
34496 -162 162 162 106 106 106 2 2 6 2 2 6
34497 - 2 2 6 195 195 195 195 195 195 6 6 6
34498 - 2 2 6 2 2 6 2 2 6 2 2 6
34499 - 74 74 74 62 62 62 22 22 22 6 6 6
34500 - 0 0 0 0 0 0 0 0 0 0 0 0
34501 - 0 0 0 0 0 0 0 0 0 0 0 0
34502 - 0 0 0 0 0 0 0 0 0 0 0 0
34503 - 0 0 0 0 0 0 0 0 0 0 0 0
34504 - 0 0 0 0 0 0 0 0 0 0 0 0
34505 - 0 0 0 0 0 0 0 0 0 0 0 0
34506 - 0 0 0 0 0 0 0 0 1 0 0 1
34507 - 0 0 1 0 0 0 0 0 1 0 0 0
34508 - 0 0 0 0 0 0 0 0 0 0 0 0
34509 - 0 0 0 0 0 0 0 0 0 0 0 0
34510 - 0 0 0 0 0 0 0 0 0 0 0 0
34511 - 0 0 0 0 0 0 0 0 0 0 0 0
34512 - 0 0 0 0 0 0 0 0 0 10 10 10
34513 - 38 38 38 90 90 90 14 14 14 58 58 58
34514 -210 210 210 26 26 26 54 38 6 154 114 10
34515 -226 170 11 236 186 11 225 175 15 184 144 12
34516 -215 174 15 175 146 61 37 26 9 2 2 6
34517 - 70 70 70 246 246 246 138 138 138 2 2 6
34518 - 2 2 6 2 2 6 2 2 6 2 2 6
34519 - 70 70 70 66 66 66 26 26 26 6 6 6
34520 - 0 0 0 0 0 0 0 0 0 0 0 0
34521 - 0 0 0 0 0 0 0 0 0 0 0 0
34522 - 0 0 0 0 0 0 0 0 0 0 0 0
34523 - 0 0 0 0 0 0 0 0 0 0 0 0
34524 - 0 0 0 0 0 0 0 0 0 0 0 0
34525 - 0 0 0 0 0 0 0 0 0 0 0 0
34526 - 0 0 0 0 0 0 0 0 0 0 0 0
34527 - 0 0 0 0 0 0 0 0 0 0 0 0
34528 - 0 0 0 0 0 0 0 0 0 0 0 0
34529 - 0 0 0 0 0 0 0 0 0 0 0 0
34530 - 0 0 0 0 0 0 0 0 0 0 0 0
34531 - 0 0 0 0 0 0 0 0 0 0 0 0
34532 - 0 0 0 0 0 0 0 0 0 10 10 10
34533 - 38 38 38 86 86 86 14 14 14 10 10 10
34534 -195 195 195 188 164 115 192 133 9 225 175 15
34535 -239 182 13 234 190 10 232 195 16 232 200 30
34536 -245 207 45 241 208 19 232 195 16 184 144 12
34537 -218 194 134 211 206 186 42 42 42 2 2 6
34538 - 2 2 6 2 2 6 2 2 6 2 2 6
34539 - 50 50 50 74 74 74 30 30 30 6 6 6
34540 - 0 0 0 0 0 0 0 0 0 0 0 0
34541 - 0 0 0 0 0 0 0 0 0 0 0 0
34542 - 0 0 0 0 0 0 0 0 0 0 0 0
34543 - 0 0 0 0 0 0 0 0 0 0 0 0
34544 - 0 0 0 0 0 0 0 0 0 0 0 0
34545 - 0 0 0 0 0 0 0 0 0 0 0 0
34546 - 0 0 0 0 0 0 0 0 0 0 0 0
34547 - 0 0 0 0 0 0 0 0 0 0 0 0
34548 - 0 0 0 0 0 0 0 0 0 0 0 0
34549 - 0 0 0 0 0 0 0 0 0 0 0 0
34550 - 0 0 0 0 0 0 0 0 0 0 0 0
34551 - 0 0 0 0 0 0 0 0 0 0 0 0
34552 - 0 0 0 0 0 0 0 0 0 10 10 10
34553 - 34 34 34 86 86 86 14 14 14 2 2 6
34554 -121 87 25 192 133 9 219 162 10 239 182 13
34555 -236 186 11 232 195 16 241 208 19 244 214 54
34556 -246 218 60 246 218 38 246 215 20 241 208 19
34557 -241 208 19 226 184 13 121 87 25 2 2 6
34558 - 2 2 6 2 2 6 2 2 6 2 2 6
34559 - 50 50 50 82 82 82 34 34 34 10 10 10
34560 - 0 0 0 0 0 0 0 0 0 0 0 0
34561 - 0 0 0 0 0 0 0 0 0 0 0 0
34562 - 0 0 0 0 0 0 0 0 0 0 0 0
34563 - 0 0 0 0 0 0 0 0 0 0 0 0
34564 - 0 0 0 0 0 0 0 0 0 0 0 0
34565 - 0 0 0 0 0 0 0 0 0 0 0 0
34566 - 0 0 0 0 0 0 0 0 0 0 0 0
34567 - 0 0 0 0 0 0 0 0 0 0 0 0
34568 - 0 0 0 0 0 0 0 0 0 0 0 0
34569 - 0 0 0 0 0 0 0 0 0 0 0 0
34570 - 0 0 0 0 0 0 0 0 0 0 0 0
34571 - 0 0 0 0 0 0 0 0 0 0 0 0
34572 - 0 0 0 0 0 0 0 0 0 10 10 10
34573 - 34 34 34 82 82 82 30 30 30 61 42 6
34574 -180 123 7 206 145 10 230 174 11 239 182 13
34575 -234 190 10 238 202 15 241 208 19 246 218 74
34576 -246 218 38 246 215 20 246 215 20 246 215 20
34577 -226 184 13 215 174 15 184 144 12 6 6 6
34578 - 2 2 6 2 2 6 2 2 6 2 2 6
34579 - 26 26 26 94 94 94 42 42 42 14 14 14
34580 - 0 0 0 0 0 0 0 0 0 0 0 0
34581 - 0 0 0 0 0 0 0 0 0 0 0 0
34582 - 0 0 0 0 0 0 0 0 0 0 0 0
34583 - 0 0 0 0 0 0 0 0 0 0 0 0
34584 - 0 0 0 0 0 0 0 0 0 0 0 0
34585 - 0 0 0 0 0 0 0 0 0 0 0 0
34586 - 0 0 0 0 0 0 0 0 0 0 0 0
34587 - 0 0 0 0 0 0 0 0 0 0 0 0
34588 - 0 0 0 0 0 0 0 0 0 0 0 0
34589 - 0 0 0 0 0 0 0 0 0 0 0 0
34590 - 0 0 0 0 0 0 0 0 0 0 0 0
34591 - 0 0 0 0 0 0 0 0 0 0 0 0
34592 - 0 0 0 0 0 0 0 0 0 10 10 10
34593 - 30 30 30 78 78 78 50 50 50 104 69 6
34594 -192 133 9 216 158 10 236 178 12 236 186 11
34595 -232 195 16 241 208 19 244 214 54 245 215 43
34596 -246 215 20 246 215 20 241 208 19 198 155 10
34597 -200 144 11 216 158 10 156 118 10 2 2 6
34598 - 2 2 6 2 2 6 2 2 6 2 2 6
34599 - 6 6 6 90 90 90 54 54 54 18 18 18
34600 - 6 6 6 0 0 0 0 0 0 0 0 0
34601 - 0 0 0 0 0 0 0 0 0 0 0 0
34602 - 0 0 0 0 0 0 0 0 0 0 0 0
34603 - 0 0 0 0 0 0 0 0 0 0 0 0
34604 - 0 0 0 0 0 0 0 0 0 0 0 0
34605 - 0 0 0 0 0 0 0 0 0 0 0 0
34606 - 0 0 0 0 0 0 0 0 0 0 0 0
34607 - 0 0 0 0 0 0 0 0 0 0 0 0
34608 - 0 0 0 0 0 0 0 0 0 0 0 0
34609 - 0 0 0 0 0 0 0 0 0 0 0 0
34610 - 0 0 0 0 0 0 0 0 0 0 0 0
34611 - 0 0 0 0 0 0 0 0 0 0 0 0
34612 - 0 0 0 0 0 0 0 0 0 10 10 10
34613 - 30 30 30 78 78 78 46 46 46 22 22 22
34614 -137 92 6 210 162 10 239 182 13 238 190 10
34615 -238 202 15 241 208 19 246 215 20 246 215 20
34616 -241 208 19 203 166 17 185 133 11 210 150 10
34617 -216 158 10 210 150 10 102 78 10 2 2 6
34618 - 6 6 6 54 54 54 14 14 14 2 2 6
34619 - 2 2 6 62 62 62 74 74 74 30 30 30
34620 - 10 10 10 0 0 0 0 0 0 0 0 0
34621 - 0 0 0 0 0 0 0 0 0 0 0 0
34622 - 0 0 0 0 0 0 0 0 0 0 0 0
34623 - 0 0 0 0 0 0 0 0 0 0 0 0
34624 - 0 0 0 0 0 0 0 0 0 0 0 0
34625 - 0 0 0 0 0 0 0 0 0 0 0 0
34626 - 0 0 0 0 0 0 0 0 0 0 0 0
34627 - 0 0 0 0 0 0 0 0 0 0 0 0
34628 - 0 0 0 0 0 0 0 0 0 0 0 0
34629 - 0 0 0 0 0 0 0 0 0 0 0 0
34630 - 0 0 0 0 0 0 0 0 0 0 0 0
34631 - 0 0 0 0 0 0 0 0 0 0 0 0
34632 - 0 0 0 0 0 0 0 0 0 10 10 10
34633 - 34 34 34 78 78 78 50 50 50 6 6 6
34634 - 94 70 30 139 102 15 190 146 13 226 184 13
34635 -232 200 30 232 195 16 215 174 15 190 146 13
34636 -168 122 10 192 133 9 210 150 10 213 154 11
34637 -202 150 34 182 157 106 101 98 89 2 2 6
34638 - 2 2 6 78 78 78 116 116 116 58 58 58
34639 - 2 2 6 22 22 22 90 90 90 46 46 46
34640 - 18 18 18 6 6 6 0 0 0 0 0 0
34641 - 0 0 0 0 0 0 0 0 0 0 0 0
34642 - 0 0 0 0 0 0 0 0 0 0 0 0
34643 - 0 0 0 0 0 0 0 0 0 0 0 0
34644 - 0 0 0 0 0 0 0 0 0 0 0 0
34645 - 0 0 0 0 0 0 0 0 0 0 0 0
34646 - 0 0 0 0 0 0 0 0 0 0 0 0
34647 - 0 0 0 0 0 0 0 0 0 0 0 0
34648 - 0 0 0 0 0 0 0 0 0 0 0 0
34649 - 0 0 0 0 0 0 0 0 0 0 0 0
34650 - 0 0 0 0 0 0 0 0 0 0 0 0
34651 - 0 0 0 0 0 0 0 0 0 0 0 0
34652 - 0 0 0 0 0 0 0 0 0 10 10 10
34653 - 38 38 38 86 86 86 50 50 50 6 6 6
34654 -128 128 128 174 154 114 156 107 11 168 122 10
34655 -198 155 10 184 144 12 197 138 11 200 144 11
34656 -206 145 10 206 145 10 197 138 11 188 164 115
34657 -195 195 195 198 198 198 174 174 174 14 14 14
34658 - 2 2 6 22 22 22 116 116 116 116 116 116
34659 - 22 22 22 2 2 6 74 74 74 70 70 70
34660 - 30 30 30 10 10 10 0 0 0 0 0 0
34661 - 0 0 0 0 0 0 0 0 0 0 0 0
34662 - 0 0 0 0 0 0 0 0 0 0 0 0
34663 - 0 0 0 0 0 0 0 0 0 0 0 0
34664 - 0 0 0 0 0 0 0 0 0 0 0 0
34665 - 0 0 0 0 0 0 0 0 0 0 0 0
34666 - 0 0 0 0 0 0 0 0 0 0 0 0
34667 - 0 0 0 0 0 0 0 0 0 0 0 0
34668 - 0 0 0 0 0 0 0 0 0 0 0 0
34669 - 0 0 0 0 0 0 0 0 0 0 0 0
34670 - 0 0 0 0 0 0 0 0 0 0 0 0
34671 - 0 0 0 0 0 0 0 0 0 0 0 0
34672 - 0 0 0 0 0 0 6 6 6 18 18 18
34673 - 50 50 50 101 101 101 26 26 26 10 10 10
34674 -138 138 138 190 190 190 174 154 114 156 107 11
34675 -197 138 11 200 144 11 197 138 11 192 133 9
34676 -180 123 7 190 142 34 190 178 144 187 187 187
34677 -202 202 202 221 221 221 214 214 214 66 66 66
34678 - 2 2 6 2 2 6 50 50 50 62 62 62
34679 - 6 6 6 2 2 6 10 10 10 90 90 90
34680 - 50 50 50 18 18 18 6 6 6 0 0 0
34681 - 0 0 0 0 0 0 0 0 0 0 0 0
34682 - 0 0 0 0 0 0 0 0 0 0 0 0
34683 - 0 0 0 0 0 0 0 0 0 0 0 0
34684 - 0 0 0 0 0 0 0 0 0 0 0 0
34685 - 0 0 0 0 0 0 0 0 0 0 0 0
34686 - 0 0 0 0 0 0 0 0 0 0 0 0
34687 - 0 0 0 0 0 0 0 0 0 0 0 0
34688 - 0 0 0 0 0 0 0 0 0 0 0 0
34689 - 0 0 0 0 0 0 0 0 0 0 0 0
34690 - 0 0 0 0 0 0 0 0 0 0 0 0
34691 - 0 0 0 0 0 0 0 0 0 0 0 0
34692 - 0 0 0 0 0 0 10 10 10 34 34 34
34693 - 74 74 74 74 74 74 2 2 6 6 6 6
34694 -144 144 144 198 198 198 190 190 190 178 166 146
34695 -154 121 60 156 107 11 156 107 11 168 124 44
34696 -174 154 114 187 187 187 190 190 190 210 210 210
34697 -246 246 246 253 253 253 253 253 253 182 182 182
34698 - 6 6 6 2 2 6 2 2 6 2 2 6
34699 - 2 2 6 2 2 6 2 2 6 62 62 62
34700 - 74 74 74 34 34 34 14 14 14 0 0 0
34701 - 0 0 0 0 0 0 0 0 0 0 0 0
34702 - 0 0 0 0 0 0 0 0 0 0 0 0
34703 - 0 0 0 0 0 0 0 0 0 0 0 0
34704 - 0 0 0 0 0 0 0 0 0 0 0 0
34705 - 0 0 0 0 0 0 0 0 0 0 0 0
34706 - 0 0 0 0 0 0 0 0 0 0 0 0
34707 - 0 0 0 0 0 0 0 0 0 0 0 0
34708 - 0 0 0 0 0 0 0 0 0 0 0 0
34709 - 0 0 0 0 0 0 0 0 0 0 0 0
34710 - 0 0 0 0 0 0 0 0 0 0 0 0
34711 - 0 0 0 0 0 0 0 0 0 0 0 0
34712 - 0 0 0 10 10 10 22 22 22 54 54 54
34713 - 94 94 94 18 18 18 2 2 6 46 46 46
34714 -234 234 234 221 221 221 190 190 190 190 190 190
34715 -190 190 190 187 187 187 187 187 187 190 190 190
34716 -190 190 190 195 195 195 214 214 214 242 242 242
34717 -253 253 253 253 253 253 253 253 253 253 253 253
34718 - 82 82 82 2 2 6 2 2 6 2 2 6
34719 - 2 2 6 2 2 6 2 2 6 14 14 14
34720 - 86 86 86 54 54 54 22 22 22 6 6 6
34721 - 0 0 0 0 0 0 0 0 0 0 0 0
34722 - 0 0 0 0 0 0 0 0 0 0 0 0
34723 - 0 0 0 0 0 0 0 0 0 0 0 0
34724 - 0 0 0 0 0 0 0 0 0 0 0 0
34725 - 0 0 0 0 0 0 0 0 0 0 0 0
34726 - 0 0 0 0 0 0 0 0 0 0 0 0
34727 - 0 0 0 0 0 0 0 0 0 0 0 0
34728 - 0 0 0 0 0 0 0 0 0 0 0 0
34729 - 0 0 0 0 0 0 0 0 0 0 0 0
34730 - 0 0 0 0 0 0 0 0 0 0 0 0
34731 - 0 0 0 0 0 0 0 0 0 0 0 0
34732 - 6 6 6 18 18 18 46 46 46 90 90 90
34733 - 46 46 46 18 18 18 6 6 6 182 182 182
34734 -253 253 253 246 246 246 206 206 206 190 190 190
34735 -190 190 190 190 190 190 190 190 190 190 190 190
34736 -206 206 206 231 231 231 250 250 250 253 253 253
34737 -253 253 253 253 253 253 253 253 253 253 253 253
34738 -202 202 202 14 14 14 2 2 6 2 2 6
34739 - 2 2 6 2 2 6 2 2 6 2 2 6
34740 - 42 42 42 86 86 86 42 42 42 18 18 18
34741 - 6 6 6 0 0 0 0 0 0 0 0 0
34742 - 0 0 0 0 0 0 0 0 0 0 0 0
34743 - 0 0 0 0 0 0 0 0 0 0 0 0
34744 - 0 0 0 0 0 0 0 0 0 0 0 0
34745 - 0 0 0 0 0 0 0 0 0 0 0 0
34746 - 0 0 0 0 0 0 0 0 0 0 0 0
34747 - 0 0 0 0 0 0 0 0 0 0 0 0
34748 - 0 0 0 0 0 0 0 0 0 0 0 0
34749 - 0 0 0 0 0 0 0 0 0 0 0 0
34750 - 0 0 0 0 0 0 0 0 0 0 0 0
34751 - 0 0 0 0 0 0 0 0 0 6 6 6
34752 - 14 14 14 38 38 38 74 74 74 66 66 66
34753 - 2 2 6 6 6 6 90 90 90 250 250 250
34754 -253 253 253 253 253 253 238 238 238 198 198 198
34755 -190 190 190 190 190 190 195 195 195 221 221 221
34756 -246 246 246 253 253 253 253 253 253 253 253 253
34757 -253 253 253 253 253 253 253 253 253 253 253 253
34758 -253 253 253 82 82 82 2 2 6 2 2 6
34759 - 2 2 6 2 2 6 2 2 6 2 2 6
34760 - 2 2 6 78 78 78 70 70 70 34 34 34
34761 - 14 14 14 6 6 6 0 0 0 0 0 0
34762 - 0 0 0 0 0 0 0 0 0 0 0 0
34763 - 0 0 0 0 0 0 0 0 0 0 0 0
34764 - 0 0 0 0 0 0 0 0 0 0 0 0
34765 - 0 0 0 0 0 0 0 0 0 0 0 0
34766 - 0 0 0 0 0 0 0 0 0 0 0 0
34767 - 0 0 0 0 0 0 0 0 0 0 0 0
34768 - 0 0 0 0 0 0 0 0 0 0 0 0
34769 - 0 0 0 0 0 0 0 0 0 0 0 0
34770 - 0 0 0 0 0 0 0 0 0 0 0 0
34771 - 0 0 0 0 0 0 0 0 0 14 14 14
34772 - 34 34 34 66 66 66 78 78 78 6 6 6
34773 - 2 2 6 18 18 18 218 218 218 253 253 253
34774 -253 253 253 253 253 253 253 253 253 246 246 246
34775 -226 226 226 231 231 231 246 246 246 253 253 253
34776 -253 253 253 253 253 253 253 253 253 253 253 253
34777 -253 253 253 253 253 253 253 253 253 253 253 253
34778 -253 253 253 178 178 178 2 2 6 2 2 6
34779 - 2 2 6 2 2 6 2 2 6 2 2 6
34780 - 2 2 6 18 18 18 90 90 90 62 62 62
34781 - 30 30 30 10 10 10 0 0 0 0 0 0
34782 - 0 0 0 0 0 0 0 0 0 0 0 0
34783 - 0 0 0 0 0 0 0 0 0 0 0 0
34784 - 0 0 0 0 0 0 0 0 0 0 0 0
34785 - 0 0 0 0 0 0 0 0 0 0 0 0
34786 - 0 0 0 0 0 0 0 0 0 0 0 0
34787 - 0 0 0 0 0 0 0 0 0 0 0 0
34788 - 0 0 0 0 0 0 0 0 0 0 0 0
34789 - 0 0 0 0 0 0 0 0 0 0 0 0
34790 - 0 0 0 0 0 0 0 0 0 0 0 0
34791 - 0 0 0 0 0 0 10 10 10 26 26 26
34792 - 58 58 58 90 90 90 18 18 18 2 2 6
34793 - 2 2 6 110 110 110 253 253 253 253 253 253
34794 -253 253 253 253 253 253 253 253 253 253 253 253
34795 -250 250 250 253 253 253 253 253 253 253 253 253
34796 -253 253 253 253 253 253 253 253 253 253 253 253
34797 -253 253 253 253 253 253 253 253 253 253 253 253
34798 -253 253 253 231 231 231 18 18 18 2 2 6
34799 - 2 2 6 2 2 6 2 2 6 2 2 6
34800 - 2 2 6 2 2 6 18 18 18 94 94 94
34801 - 54 54 54 26 26 26 10 10 10 0 0 0
34802 - 0 0 0 0 0 0 0 0 0 0 0 0
34803 - 0 0 0 0 0 0 0 0 0 0 0 0
34804 - 0 0 0 0 0 0 0 0 0 0 0 0
34805 - 0 0 0 0 0 0 0 0 0 0 0 0
34806 - 0 0 0 0 0 0 0 0 0 0 0 0
34807 - 0 0 0 0 0 0 0 0 0 0 0 0
34808 - 0 0 0 0 0 0 0 0 0 0 0 0
34809 - 0 0 0 0 0 0 0 0 0 0 0 0
34810 - 0 0 0 0 0 0 0 0 0 0 0 0
34811 - 0 0 0 6 6 6 22 22 22 50 50 50
34812 - 90 90 90 26 26 26 2 2 6 2 2 6
34813 - 14 14 14 195 195 195 250 250 250 253 253 253
34814 -253 253 253 253 253 253 253 253 253 253 253 253
34815 -253 253 253 253 253 253 253 253 253 253 253 253
34816 -253 253 253 253 253 253 253 253 253 253 253 253
34817 -253 253 253 253 253 253 253 253 253 253 253 253
34818 -250 250 250 242 242 242 54 54 54 2 2 6
34819 - 2 2 6 2 2 6 2 2 6 2 2 6
34820 - 2 2 6 2 2 6 2 2 6 38 38 38
34821 - 86 86 86 50 50 50 22 22 22 6 6 6
34822 - 0 0 0 0 0 0 0 0 0 0 0 0
34823 - 0 0 0 0 0 0 0 0 0 0 0 0
34824 - 0 0 0 0 0 0 0 0 0 0 0 0
34825 - 0 0 0 0 0 0 0 0 0 0 0 0
34826 - 0 0 0 0 0 0 0 0 0 0 0 0
34827 - 0 0 0 0 0 0 0 0 0 0 0 0
34828 - 0 0 0 0 0 0 0 0 0 0 0 0
34829 - 0 0 0 0 0 0 0 0 0 0 0 0
34830 - 0 0 0 0 0 0 0 0 0 0 0 0
34831 - 6 6 6 14 14 14 38 38 38 82 82 82
34832 - 34 34 34 2 2 6 2 2 6 2 2 6
34833 - 42 42 42 195 195 195 246 246 246 253 253 253
34834 -253 253 253 253 253 253 253 253 253 250 250 250
34835 -242 242 242 242 242 242 250 250 250 253 253 253
34836 -253 253 253 253 253 253 253 253 253 253 253 253
34837 -253 253 253 250 250 250 246 246 246 238 238 238
34838 -226 226 226 231 231 231 101 101 101 6 6 6
34839 - 2 2 6 2 2 6 2 2 6 2 2 6
34840 - 2 2 6 2 2 6 2 2 6 2 2 6
34841 - 38 38 38 82 82 82 42 42 42 14 14 14
34842 - 6 6 6 0 0 0 0 0 0 0 0 0
34843 - 0 0 0 0 0 0 0 0 0 0 0 0
34844 - 0 0 0 0 0 0 0 0 0 0 0 0
34845 - 0 0 0 0 0 0 0 0 0 0 0 0
34846 - 0 0 0 0 0 0 0 0 0 0 0 0
34847 - 0 0 0 0 0 0 0 0 0 0 0 0
34848 - 0 0 0 0 0 0 0 0 0 0 0 0
34849 - 0 0 0 0 0 0 0 0 0 0 0 0
34850 - 0 0 0 0 0 0 0 0 0 0 0 0
34851 - 10 10 10 26 26 26 62 62 62 66 66 66
34852 - 2 2 6 2 2 6 2 2 6 6 6 6
34853 - 70 70 70 170 170 170 206 206 206 234 234 234
34854 -246 246 246 250 250 250 250 250 250 238 238 238
34855 -226 226 226 231 231 231 238 238 238 250 250 250
34856 -250 250 250 250 250 250 246 246 246 231 231 231
34857 -214 214 214 206 206 206 202 202 202 202 202 202
34858 -198 198 198 202 202 202 182 182 182 18 18 18
34859 - 2 2 6 2 2 6 2 2 6 2 2 6
34860 - 2 2 6 2 2 6 2 2 6 2 2 6
34861 - 2 2 6 62 62 62 66 66 66 30 30 30
34862 - 10 10 10 0 0 0 0 0 0 0 0 0
34863 - 0 0 0 0 0 0 0 0 0 0 0 0
34864 - 0 0 0 0 0 0 0 0 0 0 0 0
34865 - 0 0 0 0 0 0 0 0 0 0 0 0
34866 - 0 0 0 0 0 0 0 0 0 0 0 0
34867 - 0 0 0 0 0 0 0 0 0 0 0 0
34868 - 0 0 0 0 0 0 0 0 0 0 0 0
34869 - 0 0 0 0 0 0 0 0 0 0 0 0
34870 - 0 0 0 0 0 0 0 0 0 0 0 0
34871 - 14 14 14 42 42 42 82 82 82 18 18 18
34872 - 2 2 6 2 2 6 2 2 6 10 10 10
34873 - 94 94 94 182 182 182 218 218 218 242 242 242
34874 -250 250 250 253 253 253 253 253 253 250 250 250
34875 -234 234 234 253 253 253 253 253 253 253 253 253
34876 -253 253 253 253 253 253 253 253 253 246 246 246
34877 -238 238 238 226 226 226 210 210 210 202 202 202
34878 -195 195 195 195 195 195 210 210 210 158 158 158
34879 - 6 6 6 14 14 14 50 50 50 14 14 14
34880 - 2 2 6 2 2 6 2 2 6 2 2 6
34881 - 2 2 6 6 6 6 86 86 86 46 46 46
34882 - 18 18 18 6 6 6 0 0 0 0 0 0
34883 - 0 0 0 0 0 0 0 0 0 0 0 0
34884 - 0 0 0 0 0 0 0 0 0 0 0 0
34885 - 0 0 0 0 0 0 0 0 0 0 0 0
34886 - 0 0 0 0 0 0 0 0 0 0 0 0
34887 - 0 0 0 0 0 0 0 0 0 0 0 0
34888 - 0 0 0 0 0 0 0 0 0 0 0 0
34889 - 0 0 0 0 0 0 0 0 0 0 0 0
34890 - 0 0 0 0 0 0 0 0 0 6 6 6
34891 - 22 22 22 54 54 54 70 70 70 2 2 6
34892 - 2 2 6 10 10 10 2 2 6 22 22 22
34893 -166 166 166 231 231 231 250 250 250 253 253 253
34894 -253 253 253 253 253 253 253 253 253 250 250 250
34895 -242 242 242 253 253 253 253 253 253 253 253 253
34896 -253 253 253 253 253 253 253 253 253 253 253 253
34897 -253 253 253 253 253 253 253 253 253 246 246 246
34898 -231 231 231 206 206 206 198 198 198 226 226 226
34899 - 94 94 94 2 2 6 6 6 6 38 38 38
34900 - 30 30 30 2 2 6 2 2 6 2 2 6
34901 - 2 2 6 2 2 6 62 62 62 66 66 66
34902 - 26 26 26 10 10 10 0 0 0 0 0 0
34903 - 0 0 0 0 0 0 0 0 0 0 0 0
34904 - 0 0 0 0 0 0 0 0 0 0 0 0
34905 - 0 0 0 0 0 0 0 0 0 0 0 0
34906 - 0 0 0 0 0 0 0 0 0 0 0 0
34907 - 0 0 0 0 0 0 0 0 0 0 0 0
34908 - 0 0 0 0 0 0 0 0 0 0 0 0
34909 - 0 0 0 0 0 0 0 0 0 0 0 0
34910 - 0 0 0 0 0 0 0 0 0 10 10 10
34911 - 30 30 30 74 74 74 50 50 50 2 2 6
34912 - 26 26 26 26 26 26 2 2 6 106 106 106
34913 -238 238 238 253 253 253 253 253 253 253 253 253
34914 -253 253 253 253 253 253 253 253 253 253 253 253
34915 -253 253 253 253 253 253 253 253 253 253 253 253
34916 -253 253 253 253 253 253 253 253 253 253 253 253
34917 -253 253 253 253 253 253 253 253 253 253 253 253
34918 -253 253 253 246 246 246 218 218 218 202 202 202
34919 -210 210 210 14 14 14 2 2 6 2 2 6
34920 - 30 30 30 22 22 22 2 2 6 2 2 6
34921 - 2 2 6 2 2 6 18 18 18 86 86 86
34922 - 42 42 42 14 14 14 0 0 0 0 0 0
34923 - 0 0 0 0 0 0 0 0 0 0 0 0
34924 - 0 0 0 0 0 0 0 0 0 0 0 0
34925 - 0 0 0 0 0 0 0 0 0 0 0 0
34926 - 0 0 0 0 0 0 0 0 0 0 0 0
34927 - 0 0 0 0 0 0 0 0 0 0 0 0
34928 - 0 0 0 0 0 0 0 0 0 0 0 0
34929 - 0 0 0 0 0 0 0 0 0 0 0 0
34930 - 0 0 0 0 0 0 0 0 0 14 14 14
34931 - 42 42 42 90 90 90 22 22 22 2 2 6
34932 - 42 42 42 2 2 6 18 18 18 218 218 218
34933 -253 253 253 253 253 253 253 253 253 253 253 253
34934 -253 253 253 253 253 253 253 253 253 253 253 253
34935 -253 253 253 253 253 253 253 253 253 253 253 253
34936 -253 253 253 253 253 253 253 253 253 253 253 253
34937 -253 253 253 253 253 253 253 253 253 253 253 253
34938 -253 253 253 253 253 253 250 250 250 221 221 221
34939 -218 218 218 101 101 101 2 2 6 14 14 14
34940 - 18 18 18 38 38 38 10 10 10 2 2 6
34941 - 2 2 6 2 2 6 2 2 6 78 78 78
34942 - 58 58 58 22 22 22 6 6 6 0 0 0
34943 - 0 0 0 0 0 0 0 0 0 0 0 0
34944 - 0 0 0 0 0 0 0 0 0 0 0 0
34945 - 0 0 0 0 0 0 0 0 0 0 0 0
34946 - 0 0 0 0 0 0 0 0 0 0 0 0
34947 - 0 0 0 0 0 0 0 0 0 0 0 0
34948 - 0 0 0 0 0 0 0 0 0 0 0 0
34949 - 0 0 0 0 0 0 0 0 0 0 0 0
34950 - 0 0 0 0 0 0 6 6 6 18 18 18
34951 - 54 54 54 82 82 82 2 2 6 26 26 26
34952 - 22 22 22 2 2 6 123 123 123 253 253 253
34953 -253 253 253 253 253 253 253 253 253 253 253 253
34954 -253 253 253 253 253 253 253 253 253 253 253 253
34955 -253 253 253 253 253 253 253 253 253 253 253 253
34956 -253 253 253 253 253 253 253 253 253 253 253 253
34957 -253 253 253 253 253 253 253 253 253 253 253 253
34958 -253 253 253 253 253 253 253 253 253 250 250 250
34959 -238 238 238 198 198 198 6 6 6 38 38 38
34960 - 58 58 58 26 26 26 38 38 38 2 2 6
34961 - 2 2 6 2 2 6 2 2 6 46 46 46
34962 - 78 78 78 30 30 30 10 10 10 0 0 0
34963 - 0 0 0 0 0 0 0 0 0 0 0 0
34964 - 0 0 0 0 0 0 0 0 0 0 0 0
34965 - 0 0 0 0 0 0 0 0 0 0 0 0
34966 - 0 0 0 0 0 0 0 0 0 0 0 0
34967 - 0 0 0 0 0 0 0 0 0 0 0 0
34968 - 0 0 0 0 0 0 0 0 0 0 0 0
34969 - 0 0 0 0 0 0 0 0 0 0 0 0
34970 - 0 0 0 0 0 0 10 10 10 30 30 30
34971 - 74 74 74 58 58 58 2 2 6 42 42 42
34972 - 2 2 6 22 22 22 231 231 231 253 253 253
34973 -253 253 253 253 253 253 253 253 253 253 253 253
34974 -253 253 253 253 253 253 253 253 253 250 250 250
34975 -253 253 253 253 253 253 253 253 253 253 253 253
34976 -253 253 253 253 253 253 253 253 253 253 253 253
34977 -253 253 253 253 253 253 253 253 253 253 253 253
34978 -253 253 253 253 253 253 253 253 253 253 253 253
34979 -253 253 253 246 246 246 46 46 46 38 38 38
34980 - 42 42 42 14 14 14 38 38 38 14 14 14
34981 - 2 2 6 2 2 6 2 2 6 6 6 6
34982 - 86 86 86 46 46 46 14 14 14 0 0 0
34983 - 0 0 0 0 0 0 0 0 0 0 0 0
34984 - 0 0 0 0 0 0 0 0 0 0 0 0
34985 - 0 0 0 0 0 0 0 0 0 0 0 0
34986 - 0 0 0 0 0 0 0 0 0 0 0 0
34987 - 0 0 0 0 0 0 0 0 0 0 0 0
34988 - 0 0 0 0 0 0 0 0 0 0 0 0
34989 - 0 0 0 0 0 0 0 0 0 0 0 0
34990 - 0 0 0 6 6 6 14 14 14 42 42 42
34991 - 90 90 90 18 18 18 18 18 18 26 26 26
34992 - 2 2 6 116 116 116 253 253 253 253 253 253
34993 -253 253 253 253 253 253 253 253 253 253 253 253
34994 -253 253 253 253 253 253 250 250 250 238 238 238
34995 -253 253 253 253 253 253 253 253 253 253 253 253
34996 -253 253 253 253 253 253 253 253 253 253 253 253
34997 -253 253 253 253 253 253 253 253 253 253 253 253
34998 -253 253 253 253 253 253 253 253 253 253 253 253
34999 -253 253 253 253 253 253 94 94 94 6 6 6
35000 - 2 2 6 2 2 6 10 10 10 34 34 34
35001 - 2 2 6 2 2 6 2 2 6 2 2 6
35002 - 74 74 74 58 58 58 22 22 22 6 6 6
35003 - 0 0 0 0 0 0 0 0 0 0 0 0
35004 - 0 0 0 0 0 0 0 0 0 0 0 0
35005 - 0 0 0 0 0 0 0 0 0 0 0 0
35006 - 0 0 0 0 0 0 0 0 0 0 0 0
35007 - 0 0 0 0 0 0 0 0 0 0 0 0
35008 - 0 0 0 0 0 0 0 0 0 0 0 0
35009 - 0 0 0 0 0 0 0 0 0 0 0 0
35010 - 0 0 0 10 10 10 26 26 26 66 66 66
35011 - 82 82 82 2 2 6 38 38 38 6 6 6
35012 - 14 14 14 210 210 210 253 253 253 253 253 253
35013 -253 253 253 253 253 253 253 253 253 253 253 253
35014 -253 253 253 253 253 253 246 246 246 242 242 242
35015 -253 253 253 253 253 253 253 253 253 253 253 253
35016 -253 253 253 253 253 253 253 253 253 253 253 253
35017 -253 253 253 253 253 253 253 253 253 253 253 253
35018 -253 253 253 253 253 253 253 253 253 253 253 253
35019 -253 253 253 253 253 253 144 144 144 2 2 6
35020 - 2 2 6 2 2 6 2 2 6 46 46 46
35021 - 2 2 6 2 2 6 2 2 6 2 2 6
35022 - 42 42 42 74 74 74 30 30 30 10 10 10
35023 - 0 0 0 0 0 0 0 0 0 0 0 0
35024 - 0 0 0 0 0 0 0 0 0 0 0 0
35025 - 0 0 0 0 0 0 0 0 0 0 0 0
35026 - 0 0 0 0 0 0 0 0 0 0 0 0
35027 - 0 0 0 0 0 0 0 0 0 0 0 0
35028 - 0 0 0 0 0 0 0 0 0 0 0 0
35029 - 0 0 0 0 0 0 0 0 0 0 0 0
35030 - 6 6 6 14 14 14 42 42 42 90 90 90
35031 - 26 26 26 6 6 6 42 42 42 2 2 6
35032 - 74 74 74 250 250 250 253 253 253 253 253 253
35033 -253 253 253 253 253 253 253 253 253 253 253 253
35034 -253 253 253 253 253 253 242 242 242 242 242 242
35035 -253 253 253 253 253 253 253 253 253 253 253 253
35036 -253 253 253 253 253 253 253 253 253 253 253 253
35037 -253 253 253 253 253 253 253 253 253 253 253 253
35038 -253 253 253 253 253 253 253 253 253 253 253 253
35039 -253 253 253 253 253 253 182 182 182 2 2 6
35040 - 2 2 6 2 2 6 2 2 6 46 46 46
35041 - 2 2 6 2 2 6 2 2 6 2 2 6
35042 - 10 10 10 86 86 86 38 38 38 10 10 10
35043 - 0 0 0 0 0 0 0 0 0 0 0 0
35044 - 0 0 0 0 0 0 0 0 0 0 0 0
35045 - 0 0 0 0 0 0 0 0 0 0 0 0
35046 - 0 0 0 0 0 0 0 0 0 0 0 0
35047 - 0 0 0 0 0 0 0 0 0 0 0 0
35048 - 0 0 0 0 0 0 0 0 0 0 0 0
35049 - 0 0 0 0 0 0 0 0 0 0 0 0
35050 - 10 10 10 26 26 26 66 66 66 82 82 82
35051 - 2 2 6 22 22 22 18 18 18 2 2 6
35052 -149 149 149 253 253 253 253 253 253 253 253 253
35053 -253 253 253 253 253 253 253 253 253 253 253 253
35054 -253 253 253 253 253 253 234 234 234 242 242 242
35055 -253 253 253 253 253 253 253 253 253 253 253 253
35056 -253 253 253 253 253 253 253 253 253 253 253 253
35057 -253 253 253 253 253 253 253 253 253 253 253 253
35058 -253 253 253 253 253 253 253 253 253 253 253 253
35059 -253 253 253 253 253 253 206 206 206 2 2 6
35060 - 2 2 6 2 2 6 2 2 6 38 38 38
35061 - 2 2 6 2 2 6 2 2 6 2 2 6
35062 - 6 6 6 86 86 86 46 46 46 14 14 14
35063 - 0 0 0 0 0 0 0 0 0 0 0 0
35064 - 0 0 0 0 0 0 0 0 0 0 0 0
35065 - 0 0 0 0 0 0 0 0 0 0 0 0
35066 - 0 0 0 0 0 0 0 0 0 0 0 0
35067 - 0 0 0 0 0 0 0 0 0 0 0 0
35068 - 0 0 0 0 0 0 0 0 0 0 0 0
35069 - 0 0 0 0 0 0 0 0 0 6 6 6
35070 - 18 18 18 46 46 46 86 86 86 18 18 18
35071 - 2 2 6 34 34 34 10 10 10 6 6 6
35072 -210 210 210 253 253 253 253 253 253 253 253 253
35073 -253 253 253 253 253 253 253 253 253 253 253 253
35074 -253 253 253 253 253 253 234 234 234 242 242 242
35075 -253 253 253 253 253 253 253 253 253 253 253 253
35076 -253 253 253 253 253 253 253 253 253 253 253 253
35077 -253 253 253 253 253 253 253 253 253 253 253 253
35078 -253 253 253 253 253 253 253 253 253 253 253 253
35079 -253 253 253 253 253 253 221 221 221 6 6 6
35080 - 2 2 6 2 2 6 6 6 6 30 30 30
35081 - 2 2 6 2 2 6 2 2 6 2 2 6
35082 - 2 2 6 82 82 82 54 54 54 18 18 18
35083 - 6 6 6 0 0 0 0 0 0 0 0 0
35084 - 0 0 0 0 0 0 0 0 0 0 0 0
35085 - 0 0 0 0 0 0 0 0 0 0 0 0
35086 - 0 0 0 0 0 0 0 0 0 0 0 0
35087 - 0 0 0 0 0 0 0 0 0 0 0 0
35088 - 0 0 0 0 0 0 0 0 0 0 0 0
35089 - 0 0 0 0 0 0 0 0 0 10 10 10
35090 - 26 26 26 66 66 66 62 62 62 2 2 6
35091 - 2 2 6 38 38 38 10 10 10 26 26 26
35092 -238 238 238 253 253 253 253 253 253 253 253 253
35093 -253 253 253 253 253 253 253 253 253 253 253 253
35094 -253 253 253 253 253 253 231 231 231 238 238 238
35095 -253 253 253 253 253 253 253 253 253 253 253 253
35096 -253 253 253 253 253 253 253 253 253 253 253 253
35097 -253 253 253 253 253 253 253 253 253 253 253 253
35098 -253 253 253 253 253 253 253 253 253 253 253 253
35099 -253 253 253 253 253 253 231 231 231 6 6 6
35100 - 2 2 6 2 2 6 10 10 10 30 30 30
35101 - 2 2 6 2 2 6 2 2 6 2 2 6
35102 - 2 2 6 66 66 66 58 58 58 22 22 22
35103 - 6 6 6 0 0 0 0 0 0 0 0 0
35104 - 0 0 0 0 0 0 0 0 0 0 0 0
35105 - 0 0 0 0 0 0 0 0 0 0 0 0
35106 - 0 0 0 0 0 0 0 0 0 0 0 0
35107 - 0 0 0 0 0 0 0 0 0 0 0 0
35108 - 0 0 0 0 0 0 0 0 0 0 0 0
35109 - 0 0 0 0 0 0 0 0 0 10 10 10
35110 - 38 38 38 78 78 78 6 6 6 2 2 6
35111 - 2 2 6 46 46 46 14 14 14 42 42 42
35112 -246 246 246 253 253 253 253 253 253 253 253 253
35113 -253 253 253 253 253 253 253 253 253 253 253 253
35114 -253 253 253 253 253 253 231 231 231 242 242 242
35115 -253 253 253 253 253 253 253 253 253 253 253 253
35116 -253 253 253 253 253 253 253 253 253 253 253 253
35117 -253 253 253 253 253 253 253 253 253 253 253 253
35118 -253 253 253 253 253 253 253 253 253 253 253 253
35119 -253 253 253 253 253 253 234 234 234 10 10 10
35120 - 2 2 6 2 2 6 22 22 22 14 14 14
35121 - 2 2 6 2 2 6 2 2 6 2 2 6
35122 - 2 2 6 66 66 66 62 62 62 22 22 22
35123 - 6 6 6 0 0 0 0 0 0 0 0 0
35124 - 0 0 0 0 0 0 0 0 0 0 0 0
35125 - 0 0 0 0 0 0 0 0 0 0 0 0
35126 - 0 0 0 0 0 0 0 0 0 0 0 0
35127 - 0 0 0 0 0 0 0 0 0 0 0 0
35128 - 0 0 0 0 0 0 0 0 0 0 0 0
35129 - 0 0 0 0 0 0 6 6 6 18 18 18
35130 - 50 50 50 74 74 74 2 2 6 2 2 6
35131 - 14 14 14 70 70 70 34 34 34 62 62 62
35132 -250 250 250 253 253 253 253 253 253 253 253 253
35133 -253 253 253 253 253 253 253 253 253 253 253 253
35134 -253 253 253 253 253 253 231 231 231 246 246 246
35135 -253 253 253 253 253 253 253 253 253 253 253 253
35136 -253 253 253 253 253 253 253 253 253 253 253 253
35137 -253 253 253 253 253 253 253 253 253 253 253 253
35138 -253 253 253 253 253 253 253 253 253 253 253 253
35139 -253 253 253 253 253 253 234 234 234 14 14 14
35140 - 2 2 6 2 2 6 30 30 30 2 2 6
35141 - 2 2 6 2 2 6 2 2 6 2 2 6
35142 - 2 2 6 66 66 66 62 62 62 22 22 22
35143 - 6 6 6 0 0 0 0 0 0 0 0 0
35144 - 0 0 0 0 0 0 0 0 0 0 0 0
35145 - 0 0 0 0 0 0 0 0 0 0 0 0
35146 - 0 0 0 0 0 0 0 0 0 0 0 0
35147 - 0 0 0 0 0 0 0 0 0 0 0 0
35148 - 0 0 0 0 0 0 0 0 0 0 0 0
35149 - 0 0 0 0 0 0 6 6 6 18 18 18
35150 - 54 54 54 62 62 62 2 2 6 2 2 6
35151 - 2 2 6 30 30 30 46 46 46 70 70 70
35152 -250 250 250 253 253 253 253 253 253 253 253 253
35153 -253 253 253 253 253 253 253 253 253 253 253 253
35154 -253 253 253 253 253 253 231 231 231 246 246 246
35155 -253 253 253 253 253 253 253 253 253 253 253 253
35156 -253 253 253 253 253 253 253 253 253 253 253 253
35157 -253 253 253 253 253 253 253 253 253 253 253 253
35158 -253 253 253 253 253 253 253 253 253 253 253 253
35159 -253 253 253 253 253 253 226 226 226 10 10 10
35160 - 2 2 6 6 6 6 30 30 30 2 2 6
35161 - 2 2 6 2 2 6 2 2 6 2 2 6
35162 - 2 2 6 66 66 66 58 58 58 22 22 22
35163 - 6 6 6 0 0 0 0 0 0 0 0 0
35164 - 0 0 0 0 0 0 0 0 0 0 0 0
35165 - 0 0 0 0 0 0 0 0 0 0 0 0
35166 - 0 0 0 0 0 0 0 0 0 0 0 0
35167 - 0 0 0 0 0 0 0 0 0 0 0 0
35168 - 0 0 0 0 0 0 0 0 0 0 0 0
35169 - 0 0 0 0 0 0 6 6 6 22 22 22
35170 - 58 58 58 62 62 62 2 2 6 2 2 6
35171 - 2 2 6 2 2 6 30 30 30 78 78 78
35172 -250 250 250 253 253 253 253 253 253 253 253 253
35173 -253 253 253 253 253 253 253 253 253 253 253 253
35174 -253 253 253 253 253 253 231 231 231 246 246 246
35175 -253 253 253 253 253 253 253 253 253 253 253 253
35176 -253 253 253 253 253 253 253 253 253 253 253 253
35177 -253 253 253 253 253 253 253 253 253 253 253 253
35178 -253 253 253 253 253 253 253 253 253 253 253 253
35179 -253 253 253 253 253 253 206 206 206 2 2 6
35180 - 22 22 22 34 34 34 18 14 6 22 22 22
35181 - 26 26 26 18 18 18 6 6 6 2 2 6
35182 - 2 2 6 82 82 82 54 54 54 18 18 18
35183 - 6 6 6 0 0 0 0 0 0 0 0 0
35184 - 0 0 0 0 0 0 0 0 0 0 0 0
35185 - 0 0 0 0 0 0 0 0 0 0 0 0
35186 - 0 0 0 0 0 0 0 0 0 0 0 0
35187 - 0 0 0 0 0 0 0 0 0 0 0 0
35188 - 0 0 0 0 0 0 0 0 0 0 0 0
35189 - 0 0 0 0 0 0 6 6 6 26 26 26
35190 - 62 62 62 106 106 106 74 54 14 185 133 11
35191 -210 162 10 121 92 8 6 6 6 62 62 62
35192 -238 238 238 253 253 253 253 253 253 253 253 253
35193 -253 253 253 253 253 253 253 253 253 253 253 253
35194 -253 253 253 253 253 253 231 231 231 246 246 246
35195 -253 253 253 253 253 253 253 253 253 253 253 253
35196 -253 253 253 253 253 253 253 253 253 253 253 253
35197 -253 253 253 253 253 253 253 253 253 253 253 253
35198 -253 253 253 253 253 253 253 253 253 253 253 253
35199 -253 253 253 253 253 253 158 158 158 18 18 18
35200 - 14 14 14 2 2 6 2 2 6 2 2 6
35201 - 6 6 6 18 18 18 66 66 66 38 38 38
35202 - 6 6 6 94 94 94 50 50 50 18 18 18
35203 - 6 6 6 0 0 0 0 0 0 0 0 0
35204 - 0 0 0 0 0 0 0 0 0 0 0 0
35205 - 0 0 0 0 0 0 0 0 0 0 0 0
35206 - 0 0 0 0 0 0 0 0 0 0 0 0
35207 - 0 0 0 0 0 0 0 0 0 0 0 0
35208 - 0 0 0 0 0 0 0 0 0 6 6 6
35209 - 10 10 10 10 10 10 18 18 18 38 38 38
35210 - 78 78 78 142 134 106 216 158 10 242 186 14
35211 -246 190 14 246 190 14 156 118 10 10 10 10
35212 - 90 90 90 238 238 238 253 253 253 253 253 253
35213 -253 253 253 253 253 253 253 253 253 253 253 253
35214 -253 253 253 253 253 253 231 231 231 250 250 250
35215 -253 253 253 253 253 253 253 253 253 253 253 253
35216 -253 253 253 253 253 253 253 253 253 253 253 253
35217 -253 253 253 253 253 253 253 253 253 253 253 253
35218 -253 253 253 253 253 253 253 253 253 246 230 190
35219 -238 204 91 238 204 91 181 142 44 37 26 9
35220 - 2 2 6 2 2 6 2 2 6 2 2 6
35221 - 2 2 6 2 2 6 38 38 38 46 46 46
35222 - 26 26 26 106 106 106 54 54 54 18 18 18
35223 - 6 6 6 0 0 0 0 0 0 0 0 0
35224 - 0 0 0 0 0 0 0 0 0 0 0 0
35225 - 0 0 0 0 0 0 0 0 0 0 0 0
35226 - 0 0 0 0 0 0 0 0 0 0 0 0
35227 - 0 0 0 0 0 0 0 0 0 0 0 0
35228 - 0 0 0 6 6 6 14 14 14 22 22 22
35229 - 30 30 30 38 38 38 50 50 50 70 70 70
35230 -106 106 106 190 142 34 226 170 11 242 186 14
35231 -246 190 14 246 190 14 246 190 14 154 114 10
35232 - 6 6 6 74 74 74 226 226 226 253 253 253
35233 -253 253 253 253 253 253 253 253 253 253 253 253
35234 -253 253 253 253 253 253 231 231 231 250 250 250
35235 -253 253 253 253 253 253 253 253 253 253 253 253
35236 -253 253 253 253 253 253 253 253 253 253 253 253
35237 -253 253 253 253 253 253 253 253 253 253 253 253
35238 -253 253 253 253 253 253 253 253 253 228 184 62
35239 -241 196 14 241 208 19 232 195 16 38 30 10
35240 - 2 2 6 2 2 6 2 2 6 2 2 6
35241 - 2 2 6 6 6 6 30 30 30 26 26 26
35242 -203 166 17 154 142 90 66 66 66 26 26 26
35243 - 6 6 6 0 0 0 0 0 0 0 0 0
35244 - 0 0 0 0 0 0 0 0 0 0 0 0
35245 - 0 0 0 0 0 0 0 0 0 0 0 0
35246 - 0 0 0 0 0 0 0 0 0 0 0 0
35247 - 0 0 0 0 0 0 0 0 0 0 0 0
35248 - 6 6 6 18 18 18 38 38 38 58 58 58
35249 - 78 78 78 86 86 86 101 101 101 123 123 123
35250 -175 146 61 210 150 10 234 174 13 246 186 14
35251 -246 190 14 246 190 14 246 190 14 238 190 10
35252 -102 78 10 2 2 6 46 46 46 198 198 198
35253 -253 253 253 253 253 253 253 253 253 253 253 253
35254 -253 253 253 253 253 253 234 234 234 242 242 242
35255 -253 253 253 253 253 253 253 253 253 253 253 253
35256 -253 253 253 253 253 253 253 253 253 253 253 253
35257 -253 253 253 253 253 253 253 253 253 253 253 253
35258 -253 253 253 253 253 253 253 253 253 224 178 62
35259 -242 186 14 241 196 14 210 166 10 22 18 6
35260 - 2 2 6 2 2 6 2 2 6 2 2 6
35261 - 2 2 6 2 2 6 6 6 6 121 92 8
35262 -238 202 15 232 195 16 82 82 82 34 34 34
35263 - 10 10 10 0 0 0 0 0 0 0 0 0
35264 - 0 0 0 0 0 0 0 0 0 0 0 0
35265 - 0 0 0 0 0 0 0 0 0 0 0 0
35266 - 0 0 0 0 0 0 0 0 0 0 0 0
35267 - 0 0 0 0 0 0 0 0 0 0 0 0
35268 - 14 14 14 38 38 38 70 70 70 154 122 46
35269 -190 142 34 200 144 11 197 138 11 197 138 11
35270 -213 154 11 226 170 11 242 186 14 246 190 14
35271 -246 190 14 246 190 14 246 190 14 246 190 14
35272 -225 175 15 46 32 6 2 2 6 22 22 22
35273 -158 158 158 250 250 250 253 253 253 253 253 253
35274 -253 253 253 253 253 253 253 253 253 253 253 253
35275 -253 253 253 253 253 253 253 253 253 253 253 253
35276 -253 253 253 253 253 253 253 253 253 253 253 253
35277 -253 253 253 253 253 253 253 253 253 253 253 253
35278 -253 253 253 250 250 250 242 242 242 224 178 62
35279 -239 182 13 236 186 11 213 154 11 46 32 6
35280 - 2 2 6 2 2 6 2 2 6 2 2 6
35281 - 2 2 6 2 2 6 61 42 6 225 175 15
35282 -238 190 10 236 186 11 112 100 78 42 42 42
35283 - 14 14 14 0 0 0 0 0 0 0 0 0
35284 - 0 0 0 0 0 0 0 0 0 0 0 0
35285 - 0 0 0 0 0 0 0 0 0 0 0 0
35286 - 0 0 0 0 0 0 0 0 0 0 0 0
35287 - 0 0 0 0 0 0 0 0 0 6 6 6
35288 - 22 22 22 54 54 54 154 122 46 213 154 11
35289 -226 170 11 230 174 11 226 170 11 226 170 11
35290 -236 178 12 242 186 14 246 190 14 246 190 14
35291 -246 190 14 246 190 14 246 190 14 246 190 14
35292 -241 196 14 184 144 12 10 10 10 2 2 6
35293 - 6 6 6 116 116 116 242 242 242 253 253 253
35294 -253 253 253 253 253 253 253 253 253 253 253 253
35295 -253 253 253 253 253 253 253 253 253 253 253 253
35296 -253 253 253 253 253 253 253 253 253 253 253 253
35297 -253 253 253 253 253 253 253 253 253 253 253 253
35298 -253 253 253 231 231 231 198 198 198 214 170 54
35299 -236 178 12 236 178 12 210 150 10 137 92 6
35300 - 18 14 6 2 2 6 2 2 6 2 2 6
35301 - 6 6 6 70 47 6 200 144 11 236 178 12
35302 -239 182 13 239 182 13 124 112 88 58 58 58
35303 - 22 22 22 6 6 6 0 0 0 0 0 0
35304 - 0 0 0 0 0 0 0 0 0 0 0 0
35305 - 0 0 0 0 0 0 0 0 0 0 0 0
35306 - 0 0 0 0 0 0 0 0 0 0 0 0
35307 - 0 0 0 0 0 0 0 0 0 10 10 10
35308 - 30 30 30 70 70 70 180 133 36 226 170 11
35309 -239 182 13 242 186 14 242 186 14 246 186 14
35310 -246 190 14 246 190 14 246 190 14 246 190 14
35311 -246 190 14 246 190 14 246 190 14 246 190 14
35312 -246 190 14 232 195 16 98 70 6 2 2 6
35313 - 2 2 6 2 2 6 66 66 66 221 221 221
35314 -253 253 253 253 253 253 253 253 253 253 253 253
35315 -253 253 253 253 253 253 253 253 253 253 253 253
35316 -253 253 253 253 253 253 253 253 253 253 253 253
35317 -253 253 253 253 253 253 253 253 253 253 253 253
35318 -253 253 253 206 206 206 198 198 198 214 166 58
35319 -230 174 11 230 174 11 216 158 10 192 133 9
35320 -163 110 8 116 81 8 102 78 10 116 81 8
35321 -167 114 7 197 138 11 226 170 11 239 182 13
35322 -242 186 14 242 186 14 162 146 94 78 78 78
35323 - 34 34 34 14 14 14 6 6 6 0 0 0
35324 - 0 0 0 0 0 0 0 0 0 0 0 0
35325 - 0 0 0 0 0 0 0 0 0 0 0 0
35326 - 0 0 0 0 0 0 0 0 0 0 0 0
35327 - 0 0 0 0 0 0 0 0 0 6 6 6
35328 - 30 30 30 78 78 78 190 142 34 226 170 11
35329 -239 182 13 246 190 14 246 190 14 246 190 14
35330 -246 190 14 246 190 14 246 190 14 246 190 14
35331 -246 190 14 246 190 14 246 190 14 246 190 14
35332 -246 190 14 241 196 14 203 166 17 22 18 6
35333 - 2 2 6 2 2 6 2 2 6 38 38 38
35334 -218 218 218 253 253 253 253 253 253 253 253 253
35335 -253 253 253 253 253 253 253 253 253 253 253 253
35336 -253 253 253 253 253 253 253 253 253 253 253 253
35337 -253 253 253 253 253 253 253 253 253 253 253 253
35338 -250 250 250 206 206 206 198 198 198 202 162 69
35339 -226 170 11 236 178 12 224 166 10 210 150 10
35340 -200 144 11 197 138 11 192 133 9 197 138 11
35341 -210 150 10 226 170 11 242 186 14 246 190 14
35342 -246 190 14 246 186 14 225 175 15 124 112 88
35343 - 62 62 62 30 30 30 14 14 14 6 6 6
35344 - 0 0 0 0 0 0 0 0 0 0 0 0
35345 - 0 0 0 0 0 0 0 0 0 0 0 0
35346 - 0 0 0 0 0 0 0 0 0 0 0 0
35347 - 0 0 0 0 0 0 0 0 0 10 10 10
35348 - 30 30 30 78 78 78 174 135 50 224 166 10
35349 -239 182 13 246 190 14 246 190 14 246 190 14
35350 -246 190 14 246 190 14 246 190 14 246 190 14
35351 -246 190 14 246 190 14 246 190 14 246 190 14
35352 -246 190 14 246 190 14 241 196 14 139 102 15
35353 - 2 2 6 2 2 6 2 2 6 2 2 6
35354 - 78 78 78 250 250 250 253 253 253 253 253 253
35355 -253 253 253 253 253 253 253 253 253 253 253 253
35356 -253 253 253 253 253 253 253 253 253 253 253 253
35357 -253 253 253 253 253 253 253 253 253 253 253 253
35358 -250 250 250 214 214 214 198 198 198 190 150 46
35359 -219 162 10 236 178 12 234 174 13 224 166 10
35360 -216 158 10 213 154 11 213 154 11 216 158 10
35361 -226 170 11 239 182 13 246 190 14 246 190 14
35362 -246 190 14 246 190 14 242 186 14 206 162 42
35363 -101 101 101 58 58 58 30 30 30 14 14 14
35364 - 6 6 6 0 0 0 0 0 0 0 0 0
35365 - 0 0 0 0 0 0 0 0 0 0 0 0
35366 - 0 0 0 0 0 0 0 0 0 0 0 0
35367 - 0 0 0 0 0 0 0 0 0 10 10 10
35368 - 30 30 30 74 74 74 174 135 50 216 158 10
35369 -236 178 12 246 190 14 246 190 14 246 190 14
35370 -246 190 14 246 190 14 246 190 14 246 190 14
35371 -246 190 14 246 190 14 246 190 14 246 190 14
35372 -246 190 14 246 190 14 241 196 14 226 184 13
35373 - 61 42 6 2 2 6 2 2 6 2 2 6
35374 - 22 22 22 238 238 238 253 253 253 253 253 253
35375 -253 253 253 253 253 253 253 253 253 253 253 253
35376 -253 253 253 253 253 253 253 253 253 253 253 253
35377 -253 253 253 253 253 253 253 253 253 253 253 253
35378 -253 253 253 226 226 226 187 187 187 180 133 36
35379 -216 158 10 236 178 12 239 182 13 236 178 12
35380 -230 174 11 226 170 11 226 170 11 230 174 11
35381 -236 178 12 242 186 14 246 190 14 246 190 14
35382 -246 190 14 246 190 14 246 186 14 239 182 13
35383 -206 162 42 106 106 106 66 66 66 34 34 34
35384 - 14 14 14 6 6 6 0 0 0 0 0 0
35385 - 0 0 0 0 0 0 0 0 0 0 0 0
35386 - 0 0 0 0 0 0 0 0 0 0 0 0
35387 - 0 0 0 0 0 0 0 0 0 6 6 6
35388 - 26 26 26 70 70 70 163 133 67 213 154 11
35389 -236 178 12 246 190 14 246 190 14 246 190 14
35390 -246 190 14 246 190 14 246 190 14 246 190 14
35391 -246 190 14 246 190 14 246 190 14 246 190 14
35392 -246 190 14 246 190 14 246 190 14 241 196 14
35393 -190 146 13 18 14 6 2 2 6 2 2 6
35394 - 46 46 46 246 246 246 253 253 253 253 253 253
35395 -253 253 253 253 253 253 253 253 253 253 253 253
35396 -253 253 253 253 253 253 253 253 253 253 253 253
35397 -253 253 253 253 253 253 253 253 253 253 253 253
35398 -253 253 253 221 221 221 86 86 86 156 107 11
35399 -216 158 10 236 178 12 242 186 14 246 186 14
35400 -242 186 14 239 182 13 239 182 13 242 186 14
35401 -242 186 14 246 186 14 246 190 14 246 190 14
35402 -246 190 14 246 190 14 246 190 14 246 190 14
35403 -242 186 14 225 175 15 142 122 72 66 66 66
35404 - 30 30 30 10 10 10 0 0 0 0 0 0
35405 - 0 0 0 0 0 0 0 0 0 0 0 0
35406 - 0 0 0 0 0 0 0 0 0 0 0 0
35407 - 0 0 0 0 0 0 0 0 0 6 6 6
35408 - 26 26 26 70 70 70 163 133 67 210 150 10
35409 -236 178 12 246 190 14 246 190 14 246 190 14
35410 -246 190 14 246 190 14 246 190 14 246 190 14
35411 -246 190 14 246 190 14 246 190 14 246 190 14
35412 -246 190 14 246 190 14 246 190 14 246 190 14
35413 -232 195 16 121 92 8 34 34 34 106 106 106
35414 -221 221 221 253 253 253 253 253 253 253 253 253
35415 -253 253 253 253 253 253 253 253 253 253 253 253
35416 -253 253 253 253 253 253 253 253 253 253 253 253
35417 -253 253 253 253 253 253 253 253 253 253 253 253
35418 -242 242 242 82 82 82 18 14 6 163 110 8
35419 -216 158 10 236 178 12 242 186 14 246 190 14
35420 -246 190 14 246 190 14 246 190 14 246 190 14
35421 -246 190 14 246 190 14 246 190 14 246 190 14
35422 -246 190 14 246 190 14 246 190 14 246 190 14
35423 -246 190 14 246 190 14 242 186 14 163 133 67
35424 - 46 46 46 18 18 18 6 6 6 0 0 0
35425 - 0 0 0 0 0 0 0 0 0 0 0 0
35426 - 0 0 0 0 0 0 0 0 0 0 0 0
35427 - 0 0 0 0 0 0 0 0 0 10 10 10
35428 - 30 30 30 78 78 78 163 133 67 210 150 10
35429 -236 178 12 246 186 14 246 190 14 246 190 14
35430 -246 190 14 246 190 14 246 190 14 246 190 14
35431 -246 190 14 246 190 14 246 190 14 246 190 14
35432 -246 190 14 246 190 14 246 190 14 246 190 14
35433 -241 196 14 215 174 15 190 178 144 253 253 253
35434 -253 253 253 253 253 253 253 253 253 253 253 253
35435 -253 253 253 253 253 253 253 253 253 253 253 253
35436 -253 253 253 253 253 253 253 253 253 253 253 253
35437 -253 253 253 253 253 253 253 253 253 218 218 218
35438 - 58 58 58 2 2 6 22 18 6 167 114 7
35439 -216 158 10 236 178 12 246 186 14 246 190 14
35440 -246 190 14 246 190 14 246 190 14 246 190 14
35441 -246 190 14 246 190 14 246 190 14 246 190 14
35442 -246 190 14 246 190 14 246 190 14 246 190 14
35443 -246 190 14 246 186 14 242 186 14 190 150 46
35444 - 54 54 54 22 22 22 6 6 6 0 0 0
35445 - 0 0 0 0 0 0 0 0 0 0 0 0
35446 - 0 0 0 0 0 0 0 0 0 0 0 0
35447 - 0 0 0 0 0 0 0 0 0 14 14 14
35448 - 38 38 38 86 86 86 180 133 36 213 154 11
35449 -236 178 12 246 186 14 246 190 14 246 190 14
35450 -246 190 14 246 190 14 246 190 14 246 190 14
35451 -246 190 14 246 190 14 246 190 14 246 190 14
35452 -246 190 14 246 190 14 246 190 14 246 190 14
35453 -246 190 14 232 195 16 190 146 13 214 214 214
35454 -253 253 253 253 253 253 253 253 253 253 253 253
35455 -253 253 253 253 253 253 253 253 253 253 253 253
35456 -253 253 253 253 253 253 253 253 253 253 253 253
35457 -253 253 253 250 250 250 170 170 170 26 26 26
35458 - 2 2 6 2 2 6 37 26 9 163 110 8
35459 -219 162 10 239 182 13 246 186 14 246 190 14
35460 -246 190 14 246 190 14 246 190 14 246 190 14
35461 -246 190 14 246 190 14 246 190 14 246 190 14
35462 -246 190 14 246 190 14 246 190 14 246 190 14
35463 -246 186 14 236 178 12 224 166 10 142 122 72
35464 - 46 46 46 18 18 18 6 6 6 0 0 0
35465 - 0 0 0 0 0 0 0 0 0 0 0 0
35466 - 0 0 0 0 0 0 0 0 0 0 0 0
35467 - 0 0 0 0 0 0 6 6 6 18 18 18
35468 - 50 50 50 109 106 95 192 133 9 224 166 10
35469 -242 186 14 246 190 14 246 190 14 246 190 14
35470 -246 190 14 246 190 14 246 190 14 246 190 14
35471 -246 190 14 246 190 14 246 190 14 246 190 14
35472 -246 190 14 246 190 14 246 190 14 246 190 14
35473 -242 186 14 226 184 13 210 162 10 142 110 46
35474 -226 226 226 253 253 253 253 253 253 253 253 253
35475 -253 253 253 253 253 253 253 253 253 253 253 253
35476 -253 253 253 253 253 253 253 253 253 253 253 253
35477 -198 198 198 66 66 66 2 2 6 2 2 6
35478 - 2 2 6 2 2 6 50 34 6 156 107 11
35479 -219 162 10 239 182 13 246 186 14 246 190 14
35480 -246 190 14 246 190 14 246 190 14 246 190 14
35481 -246 190 14 246 190 14 246 190 14 246 190 14
35482 -246 190 14 246 190 14 246 190 14 242 186 14
35483 -234 174 13 213 154 11 154 122 46 66 66 66
35484 - 30 30 30 10 10 10 0 0 0 0 0 0
35485 - 0 0 0 0 0 0 0 0 0 0 0 0
35486 - 0 0 0 0 0 0 0 0 0 0 0 0
35487 - 0 0 0 0 0 0 6 6 6 22 22 22
35488 - 58 58 58 154 121 60 206 145 10 234 174 13
35489 -242 186 14 246 186 14 246 190 14 246 190 14
35490 -246 190 14 246 190 14 246 190 14 246 190 14
35491 -246 190 14 246 190 14 246 190 14 246 190 14
35492 -246 190 14 246 190 14 246 190 14 246 190 14
35493 -246 186 14 236 178 12 210 162 10 163 110 8
35494 - 61 42 6 138 138 138 218 218 218 250 250 250
35495 -253 253 253 253 253 253 253 253 253 250 250 250
35496 -242 242 242 210 210 210 144 144 144 66 66 66
35497 - 6 6 6 2 2 6 2 2 6 2 2 6
35498 - 2 2 6 2 2 6 61 42 6 163 110 8
35499 -216 158 10 236 178 12 246 190 14 246 190 14
35500 -246 190 14 246 190 14 246 190 14 246 190 14
35501 -246 190 14 246 190 14 246 190 14 246 190 14
35502 -246 190 14 239 182 13 230 174 11 216 158 10
35503 -190 142 34 124 112 88 70 70 70 38 38 38
35504 - 18 18 18 6 6 6 0 0 0 0 0 0
35505 - 0 0 0 0 0 0 0 0 0 0 0 0
35506 - 0 0 0 0 0 0 0 0 0 0 0 0
35507 - 0 0 0 0 0 0 6 6 6 22 22 22
35508 - 62 62 62 168 124 44 206 145 10 224 166 10
35509 -236 178 12 239 182 13 242 186 14 242 186 14
35510 -246 186 14 246 190 14 246 190 14 246 190 14
35511 -246 190 14 246 190 14 246 190 14 246 190 14
35512 -246 190 14 246 190 14 246 190 14 246 190 14
35513 -246 190 14 236 178 12 216 158 10 175 118 6
35514 - 80 54 7 2 2 6 6 6 6 30 30 30
35515 - 54 54 54 62 62 62 50 50 50 38 38 38
35516 - 14 14 14 2 2 6 2 2 6 2 2 6
35517 - 2 2 6 2 2 6 2 2 6 2 2 6
35518 - 2 2 6 6 6 6 80 54 7 167 114 7
35519 -213 154 11 236 178 12 246 190 14 246 190 14
35520 -246 190 14 246 190 14 246 190 14 246 190 14
35521 -246 190 14 242 186 14 239 182 13 239 182 13
35522 -230 174 11 210 150 10 174 135 50 124 112 88
35523 - 82 82 82 54 54 54 34 34 34 18 18 18
35524 - 6 6 6 0 0 0 0 0 0 0 0 0
35525 - 0 0 0 0 0 0 0 0 0 0 0 0
35526 - 0 0 0 0 0 0 0 0 0 0 0 0
35527 - 0 0 0 0 0 0 6 6 6 18 18 18
35528 - 50 50 50 158 118 36 192 133 9 200 144 11
35529 -216 158 10 219 162 10 224 166 10 226 170 11
35530 -230 174 11 236 178 12 239 182 13 239 182 13
35531 -242 186 14 246 186 14 246 190 14 246 190 14
35532 -246 190 14 246 190 14 246 190 14 246 190 14
35533 -246 186 14 230 174 11 210 150 10 163 110 8
35534 -104 69 6 10 10 10 2 2 6 2 2 6
35535 - 2 2 6 2 2 6 2 2 6 2 2 6
35536 - 2 2 6 2 2 6 2 2 6 2 2 6
35537 - 2 2 6 2 2 6 2 2 6 2 2 6
35538 - 2 2 6 6 6 6 91 60 6 167 114 7
35539 -206 145 10 230 174 11 242 186 14 246 190 14
35540 -246 190 14 246 190 14 246 186 14 242 186 14
35541 -239 182 13 230 174 11 224 166 10 213 154 11
35542 -180 133 36 124 112 88 86 86 86 58 58 58
35543 - 38 38 38 22 22 22 10 10 10 6 6 6
35544 - 0 0 0 0 0 0 0 0 0 0 0 0
35545 - 0 0 0 0 0 0 0 0 0 0 0 0
35546 - 0 0 0 0 0 0 0 0 0 0 0 0
35547 - 0 0 0 0 0 0 0 0 0 14 14 14
35548 - 34 34 34 70 70 70 138 110 50 158 118 36
35549 -167 114 7 180 123 7 192 133 9 197 138 11
35550 -200 144 11 206 145 10 213 154 11 219 162 10
35551 -224 166 10 230 174 11 239 182 13 242 186 14
35552 -246 186 14 246 186 14 246 186 14 246 186 14
35553 -239 182 13 216 158 10 185 133 11 152 99 6
35554 -104 69 6 18 14 6 2 2 6 2 2 6
35555 - 2 2 6 2 2 6 2 2 6 2 2 6
35556 - 2 2 6 2 2 6 2 2 6 2 2 6
35557 - 2 2 6 2 2 6 2 2 6 2 2 6
35558 - 2 2 6 6 6 6 80 54 7 152 99 6
35559 -192 133 9 219 162 10 236 178 12 239 182 13
35560 -246 186 14 242 186 14 239 182 13 236 178 12
35561 -224 166 10 206 145 10 192 133 9 154 121 60
35562 - 94 94 94 62 62 62 42 42 42 22 22 22
35563 - 14 14 14 6 6 6 0 0 0 0 0 0
35564 - 0 0 0 0 0 0 0 0 0 0 0 0
35565 - 0 0 0 0 0 0 0 0 0 0 0 0
35566 - 0 0 0 0 0 0 0 0 0 0 0 0
35567 - 0 0 0 0 0 0 0 0 0 6 6 6
35568 - 18 18 18 34 34 34 58 58 58 78 78 78
35569 -101 98 89 124 112 88 142 110 46 156 107 11
35570 -163 110 8 167 114 7 175 118 6 180 123 7
35571 -185 133 11 197 138 11 210 150 10 219 162 10
35572 -226 170 11 236 178 12 236 178 12 234 174 13
35573 -219 162 10 197 138 11 163 110 8 130 83 6
35574 - 91 60 6 10 10 10 2 2 6 2 2 6
35575 - 18 18 18 38 38 38 38 38 38 38 38 38
35576 - 38 38 38 38 38 38 38 38 38 38 38 38
35577 - 38 38 38 38 38 38 26 26 26 2 2 6
35578 - 2 2 6 6 6 6 70 47 6 137 92 6
35579 -175 118 6 200 144 11 219 162 10 230 174 11
35580 -234 174 13 230 174 11 219 162 10 210 150 10
35581 -192 133 9 163 110 8 124 112 88 82 82 82
35582 - 50 50 50 30 30 30 14 14 14 6 6 6
35583 - 0 0 0 0 0 0 0 0 0 0 0 0
35584 - 0 0 0 0 0 0 0 0 0 0 0 0
35585 - 0 0 0 0 0 0 0 0 0 0 0 0
35586 - 0 0 0 0 0 0 0 0 0 0 0 0
35587 - 0 0 0 0 0 0 0 0 0 0 0 0
35588 - 6 6 6 14 14 14 22 22 22 34 34 34
35589 - 42 42 42 58 58 58 74 74 74 86 86 86
35590 -101 98 89 122 102 70 130 98 46 121 87 25
35591 -137 92 6 152 99 6 163 110 8 180 123 7
35592 -185 133 11 197 138 11 206 145 10 200 144 11
35593 -180 123 7 156 107 11 130 83 6 104 69 6
35594 - 50 34 6 54 54 54 110 110 110 101 98 89
35595 - 86 86 86 82 82 82 78 78 78 78 78 78
35596 - 78 78 78 78 78 78 78 78 78 78 78 78
35597 - 78 78 78 82 82 82 86 86 86 94 94 94
35598 -106 106 106 101 101 101 86 66 34 124 80 6
35599 -156 107 11 180 123 7 192 133 9 200 144 11
35600 -206 145 10 200 144 11 192 133 9 175 118 6
35601 -139 102 15 109 106 95 70 70 70 42 42 42
35602 - 22 22 22 10 10 10 0 0 0 0 0 0
35603 - 0 0 0 0 0 0 0 0 0 0 0 0
35604 - 0 0 0 0 0 0 0 0 0 0 0 0
35605 - 0 0 0 0 0 0 0 0 0 0 0 0
35606 - 0 0 0 0 0 0 0 0 0 0 0 0
35607 - 0 0 0 0 0 0 0 0 0 0 0 0
35608 - 0 0 0 0 0 0 6 6 6 10 10 10
35609 - 14 14 14 22 22 22 30 30 30 38 38 38
35610 - 50 50 50 62 62 62 74 74 74 90 90 90
35611 -101 98 89 112 100 78 121 87 25 124 80 6
35612 -137 92 6 152 99 6 152 99 6 152 99 6
35613 -138 86 6 124 80 6 98 70 6 86 66 30
35614 -101 98 89 82 82 82 58 58 58 46 46 46
35615 - 38 38 38 34 34 34 34 34 34 34 34 34
35616 - 34 34 34 34 34 34 34 34 34 34 34 34
35617 - 34 34 34 34 34 34 38 38 38 42 42 42
35618 - 54 54 54 82 82 82 94 86 76 91 60 6
35619 -134 86 6 156 107 11 167 114 7 175 118 6
35620 -175 118 6 167 114 7 152 99 6 121 87 25
35621 -101 98 89 62 62 62 34 34 34 18 18 18
35622 - 6 6 6 0 0 0 0 0 0 0 0 0
35623 - 0 0 0 0 0 0 0 0 0 0 0 0
35624 - 0 0 0 0 0 0 0 0 0 0 0 0
35625 - 0 0 0 0 0 0 0 0 0 0 0 0
35626 - 0 0 0 0 0 0 0 0 0 0 0 0
35627 - 0 0 0 0 0 0 0 0 0 0 0 0
35628 - 0 0 0 0 0 0 0 0 0 0 0 0
35629 - 0 0 0 6 6 6 6 6 6 10 10 10
35630 - 18 18 18 22 22 22 30 30 30 42 42 42
35631 - 50 50 50 66 66 66 86 86 86 101 98 89
35632 -106 86 58 98 70 6 104 69 6 104 69 6
35633 -104 69 6 91 60 6 82 62 34 90 90 90
35634 - 62 62 62 38 38 38 22 22 22 14 14 14
35635 - 10 10 10 10 10 10 10 10 10 10 10 10
35636 - 10 10 10 10 10 10 6 6 6 10 10 10
35637 - 10 10 10 10 10 10 10 10 10 14 14 14
35638 - 22 22 22 42 42 42 70 70 70 89 81 66
35639 - 80 54 7 104 69 6 124 80 6 137 92 6
35640 -134 86 6 116 81 8 100 82 52 86 86 86
35641 - 58 58 58 30 30 30 14 14 14 6 6 6
35642 - 0 0 0 0 0 0 0 0 0 0 0 0
35643 - 0 0 0 0 0 0 0 0 0 0 0 0
35644 - 0 0 0 0 0 0 0 0 0 0 0 0
35645 - 0 0 0 0 0 0 0 0 0 0 0 0
35646 - 0 0 0 0 0 0 0 0 0 0 0 0
35647 - 0 0 0 0 0 0 0 0 0 0 0 0
35648 - 0 0 0 0 0 0 0 0 0 0 0 0
35649 - 0 0 0 0 0 0 0 0 0 0 0 0
35650 - 0 0 0 6 6 6 10 10 10 14 14 14
35651 - 18 18 18 26 26 26 38 38 38 54 54 54
35652 - 70 70 70 86 86 86 94 86 76 89 81 66
35653 - 89 81 66 86 86 86 74 74 74 50 50 50
35654 - 30 30 30 14 14 14 6 6 6 0 0 0
35655 - 0 0 0 0 0 0 0 0 0 0 0 0
35656 - 0 0 0 0 0 0 0 0 0 0 0 0
35657 - 0 0 0 0 0 0 0 0 0 0 0 0
35658 - 6 6 6 18 18 18 34 34 34 58 58 58
35659 - 82 82 82 89 81 66 89 81 66 89 81 66
35660 - 94 86 66 94 86 76 74 74 74 50 50 50
35661 - 26 26 26 14 14 14 6 6 6 0 0 0
35662 - 0 0 0 0 0 0 0 0 0 0 0 0
35663 - 0 0 0 0 0 0 0 0 0 0 0 0
35664 - 0 0 0 0 0 0 0 0 0 0 0 0
35665 - 0 0 0 0 0 0 0 0 0 0 0 0
35666 - 0 0 0 0 0 0 0 0 0 0 0 0
35667 - 0 0 0 0 0 0 0 0 0 0 0 0
35668 - 0 0 0 0 0 0 0 0 0 0 0 0
35669 - 0 0 0 0 0 0 0 0 0 0 0 0
35670 - 0 0 0 0 0 0 0 0 0 0 0 0
35671 - 6 6 6 6 6 6 14 14 14 18 18 18
35672 - 30 30 30 38 38 38 46 46 46 54 54 54
35673 - 50 50 50 42 42 42 30 30 30 18 18 18
35674 - 10 10 10 0 0 0 0 0 0 0 0 0
35675 - 0 0 0 0 0 0 0 0 0 0 0 0
35676 - 0 0 0 0 0 0 0 0 0 0 0 0
35677 - 0 0 0 0 0 0 0 0 0 0 0 0
35678 - 0 0 0 6 6 6 14 14 14 26 26 26
35679 - 38 38 38 50 50 50 58 58 58 58 58 58
35680 - 54 54 54 42 42 42 30 30 30 18 18 18
35681 - 10 10 10 0 0 0 0 0 0 0 0 0
35682 - 0 0 0 0 0 0 0 0 0 0 0 0
35683 - 0 0 0 0 0 0 0 0 0 0 0 0
35684 - 0 0 0 0 0 0 0 0 0 0 0 0
35685 - 0 0 0 0 0 0 0 0 0 0 0 0
35686 - 0 0 0 0 0 0 0 0 0 0 0 0
35687 - 0 0 0 0 0 0 0 0 0 0 0 0
35688 - 0 0 0 0 0 0 0 0 0 0 0 0
35689 - 0 0 0 0 0 0 0 0 0 0 0 0
35690 - 0 0 0 0 0 0 0 0 0 0 0 0
35691 - 0 0 0 0 0 0 0 0 0 6 6 6
35692 - 6 6 6 10 10 10 14 14 14 18 18 18
35693 - 18 18 18 14 14 14 10 10 10 6 6 6
35694 - 0 0 0 0 0 0 0 0 0 0 0 0
35695 - 0 0 0 0 0 0 0 0 0 0 0 0
35696 - 0 0 0 0 0 0 0 0 0 0 0 0
35697 - 0 0 0 0 0 0 0 0 0 0 0 0
35698 - 0 0 0 0 0 0 0 0 0 6 6 6
35699 - 14 14 14 18 18 18 22 22 22 22 22 22
35700 - 18 18 18 14 14 14 10 10 10 6 6 6
35701 - 0 0 0 0 0 0 0 0 0 0 0 0
35702 - 0 0 0 0 0 0 0 0 0 0 0 0
35703 - 0 0 0 0 0 0 0 0 0 0 0 0
35704 - 0 0 0 0 0 0 0 0 0 0 0 0
35705 - 0 0 0 0 0 0 0 0 0 0 0 0
35706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35719 +4 4 4 4 4 4
35720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35733 +4 4 4 4 4 4
35734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35747 +4 4 4 4 4 4
35748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35761 +4 4 4 4 4 4
35762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35775 +4 4 4 4 4 4
35776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35789 +4 4 4 4 4 4
35790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35792 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35794 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
35795 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
35796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35797 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35798 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35799 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
35800 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35801 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
35802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35803 +4 4 4 4 4 4
35804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35808 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
35809 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
35810 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35811 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35812 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35813 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
35814 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
35815 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
35816 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35817 +4 4 4 4 4 4
35818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35822 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
35823 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
35824 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35825 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35826 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35827 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
35828 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
35829 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
35830 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
35831 +4 4 4 4 4 4
35832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35835 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
35836 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
35837 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
35838 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
35839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35840 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35841 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
35842 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
35843 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
35844 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
35845 +4 4 4 4 4 4
35846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35849 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
35850 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
35851 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
35852 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
35853 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
35854 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
35855 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
35856 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
35857 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
35858 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
35859 +4 4 4 4 4 4
35860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
35863 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
35864 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
35865 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
35866 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
35867 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
35868 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
35869 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
35870 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
35871 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
35872 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
35873 +4 4 4 4 4 4
35874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35876 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
35877 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
35878 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
35879 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
35880 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
35881 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
35882 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
35883 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
35884 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
35885 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
35886 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
35887 +4 4 4 4 4 4
35888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35890 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
35891 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
35892 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
35893 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
35894 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
35895 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
35896 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
35897 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
35898 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
35899 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
35900 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
35901 +4 4 4 4 4 4
35902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35904 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
35905 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
35906 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
35907 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
35908 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
35909 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
35910 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
35911 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
35912 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
35913 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
35914 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
35915 +4 4 4 4 4 4
35916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35918 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
35919 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
35920 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
35921 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
35922 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
35923 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
35924 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
35925 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
35926 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
35927 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
35928 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
35929 +4 4 4 4 4 4
35930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35931 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
35932 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
35933 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
35934 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
35935 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
35936 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
35937 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
35938 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
35939 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
35940 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
35941 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
35942 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
35943 +4 4 4 4 4 4
35944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
35945 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
35946 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
35947 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
35948 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
35949 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
35950 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
35951 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
35952 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
35953 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
35954 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
35955 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
35956 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
35957 +0 0 0 4 4 4
35958 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
35959 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
35960 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
35961 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
35962 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
35963 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
35964 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
35965 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
35966 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
35967 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
35968 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
35969 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
35970 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
35971 +2 0 0 0 0 0
35972 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
35973 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
35974 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
35975 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
35976 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
35977 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
35978 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
35979 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
35980 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
35981 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
35982 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
35983 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
35984 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
35985 +37 38 37 0 0 0
35986 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
35987 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
35988 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
35989 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
35990 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
35991 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
35992 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
35993 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
35994 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
35995 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
35996 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
35997 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
35998 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
35999 +85 115 134 4 0 0
36000 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
36001 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
36002 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
36003 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
36004 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
36005 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
36006 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
36007 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
36008 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
36009 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
36010 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
36011 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
36012 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
36013 +60 73 81 4 0 0
36014 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
36015 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
36016 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
36017 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
36018 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
36019 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
36020 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
36021 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
36022 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
36023 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
36024 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
36025 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
36026 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
36027 +16 19 21 4 0 0
36028 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
36029 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
36030 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
36031 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
36032 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
36033 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
36034 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
36035 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
36036 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
36037 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
36038 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
36039 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
36040 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
36041 +4 0 0 4 3 3
36042 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
36043 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
36044 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
36045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
36046 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
36047 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
36048 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
36049 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
36050 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
36051 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
36052 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
36053 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
36054 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
36055 +3 2 2 4 4 4
36056 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
36057 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
36058 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
36059 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
36060 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
36061 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
36062 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
36063 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
36064 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
36065 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
36066 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
36067 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
36068 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
36069 +4 4 4 4 4 4
36070 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
36071 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
36072 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
36073 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
36074 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
36075 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
36076 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
36077 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
36078 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
36079 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
36080 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
36081 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
36082 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
36083 +4 4 4 4 4 4
36084 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
36085 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
36086 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
36087 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
36088 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
36089 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36090 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
36091 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
36092 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
36093 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
36094 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
36095 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
36096 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
36097 +5 5 5 5 5 5
36098 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
36099 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
36100 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
36101 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
36102 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
36103 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36104 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
36105 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
36106 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
36107 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
36108 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
36109 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
36110 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36111 +5 5 5 4 4 4
36112 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
36113 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
36114 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
36115 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
36116 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36117 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
36118 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
36119 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
36120 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
36121 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
36122 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
36123 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36125 +4 4 4 4 4 4
36126 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
36127 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
36128 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
36129 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
36130 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
36131 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36132 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36133 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
36134 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
36135 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
36136 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
36137 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
36138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36139 +4 4 4 4 4 4
36140 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
36141 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
36142 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
36143 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
36144 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36145 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
36146 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
36147 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
36148 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
36149 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
36150 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
36151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36153 +4 4 4 4 4 4
36154 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
36155 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
36156 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
36157 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
36158 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36159 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36160 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
36161 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
36162 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
36163 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
36164 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
36165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36167 +4 4 4 4 4 4
36168 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
36169 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
36170 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
36171 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
36172 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36173 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
36174 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
36175 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
36176 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
36177 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
36178 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36181 +4 4 4 4 4 4
36182 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
36183 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
36184 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
36185 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
36186 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
36187 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
36188 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
36189 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
36190 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
36191 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
36192 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
36193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36195 +4 4 4 4 4 4
36196 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
36197 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
36198 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
36199 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
36200 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
36201 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
36202 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
36203 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
36204 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
36205 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
36206 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
36207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36209 +4 4 4 4 4 4
36210 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
36211 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
36212 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
36213 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
36214 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
36215 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
36216 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
36217 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
36218 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
36219 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
36220 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36223 +4 4 4 4 4 4
36224 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
36225 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
36226 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
36227 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
36228 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36229 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
36230 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
36231 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
36232 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
36233 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
36234 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36237 +4 4 4 4 4 4
36238 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
36239 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
36240 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
36241 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
36242 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36243 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
36244 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
36245 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
36246 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
36247 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
36248 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36251 +4 4 4 4 4 4
36252 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
36253 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
36254 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
36255 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
36256 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36257 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
36258 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
36259 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
36260 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
36261 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36262 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36265 +4 4 4 4 4 4
36266 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
36267 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
36268 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
36269 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
36270 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
36271 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
36272 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
36273 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
36274 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36275 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36276 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36279 +4 4 4 4 4 4
36280 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
36281 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
36282 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
36283 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
36284 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36285 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
36286 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
36287 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
36288 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36289 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36290 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36293 +4 4 4 4 4 4
36294 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
36295 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
36296 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
36297 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
36298 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
36299 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
36300 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
36301 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
36302 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36303 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36304 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36307 +4 4 4 4 4 4
36308 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
36309 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
36310 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36311 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
36312 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
36313 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
36314 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
36315 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
36316 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
36317 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36318 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36321 +4 4 4 4 4 4
36322 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
36323 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
36324 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
36325 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
36326 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
36327 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
36328 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
36329 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
36330 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36331 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36332 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36335 +4 4 4 4 4 4
36336 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
36337 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
36338 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36339 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
36340 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
36341 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
36342 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
36343 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
36344 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
36345 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36346 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36349 +4 4 4 4 4 4
36350 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
36351 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
36352 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
36353 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
36354 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
36355 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
36356 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
36357 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
36358 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36359 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36360 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36363 +4 4 4 4 4 4
36364 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36365 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
36366 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
36367 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
36368 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
36369 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
36370 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
36371 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
36372 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36373 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36374 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36377 +4 4 4 4 4 4
36378 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
36379 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
36380 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
36381 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
36382 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
36383 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
36384 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36385 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
36386 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
36387 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36388 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36391 +4 4 4 4 4 4
36392 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36393 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
36394 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
36395 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
36396 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
36397 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
36398 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
36399 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
36400 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
36401 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36402 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36405 +4 4 4 4 4 4
36406 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
36407 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
36408 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
36409 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
36410 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
36411 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
36412 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
36413 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
36414 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
36415 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36416 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36419 +4 4 4 4 4 4
36420 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36421 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
36422 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
36423 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
36424 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
36425 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
36426 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
36427 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
36428 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
36429 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36430 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36433 +4 4 4 4 4 4
36434 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
36435 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
36436 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
36437 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
36438 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
36439 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
36440 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
36441 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
36442 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
36443 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36444 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36447 +4 4 4 4 4 4
36448 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
36449 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
36450 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
36451 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
36452 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
36453 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
36454 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
36455 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
36456 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
36457 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
36458 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36461 +4 4 4 4 4 4
36462 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
36463 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
36464 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
36465 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
36466 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
36467 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
36468 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
36469 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
36470 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
36471 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
36472 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36475 +4 4 4 4 4 4
36476 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
36477 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
36478 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
36479 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
36480 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
36481 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
36482 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
36483 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
36484 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
36485 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
36486 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36489 +4 4 4 4 4 4
36490 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
36491 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
36492 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
36493 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
36494 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
36495 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
36496 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36497 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
36498 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
36499 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
36500 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36503 +4 4 4 4 4 4
36504 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
36505 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
36506 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
36507 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
36508 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
36509 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
36510 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
36511 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
36512 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
36513 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
36514 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36517 +4 4 4 4 4 4
36518 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
36519 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
36520 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
36521 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
36522 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
36523 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
36524 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
36525 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
36526 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
36527 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
36528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36531 +4 4 4 4 4 4
36532 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36533 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
36534 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
36535 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
36536 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
36537 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
36538 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
36539 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
36540 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
36541 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
36542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36545 +4 4 4 4 4 4
36546 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
36547 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
36548 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
36549 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
36550 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
36551 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
36552 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
36553 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
36554 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
36555 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
36556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36559 +4 4 4 4 4 4
36560 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
36561 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
36562 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
36563 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
36564 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
36565 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
36566 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
36567 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
36568 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
36569 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36573 +4 4 4 4 4 4
36574 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
36575 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36576 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
36577 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
36578 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
36579 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
36580 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
36581 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
36582 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
36583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36587 +4 4 4 4 4 4
36588 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
36589 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
36590 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
36591 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
36592 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
36593 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
36594 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
36595 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
36596 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
36597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36601 +4 4 4 4 4 4
36602 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36603 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
36604 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
36605 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
36606 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
36607 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
36608 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
36609 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
36610 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36615 +4 4 4 4 4 4
36616 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
36617 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
36618 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36619 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
36620 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
36621 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
36622 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
36623 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
36624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36629 +4 4 4 4 4 4
36630 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36631 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
36632 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
36633 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
36634 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
36635 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
36636 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
36637 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
36638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36643 +4 4 4 4 4 4
36644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36645 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
36646 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36647 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
36648 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
36649 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
36650 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
36651 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
36652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36657 +4 4 4 4 4 4
36658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36659 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
36660 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
36661 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
36662 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
36663 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
36664 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
36665 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
36666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36671 +4 4 4 4 4 4
36672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36673 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
36674 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
36675 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36676 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
36677 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
36678 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
36679 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36680 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36685 +4 4 4 4 4 4
36686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36688 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36689 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
36690 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
36691 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
36692 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
36693 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
36694 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36699 +4 4 4 4 4 4
36700 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36701 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36703 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36704 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
36705 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
36706 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
36707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36713 +4 4 4 4 4 4
36714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36717 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
36718 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
36719 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
36720 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
36721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36727 +4 4 4 4 4 4
36728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36731 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
36732 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
36733 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
36734 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
36735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36741 +4 4 4 4 4 4
36742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36745 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
36746 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
36747 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
36748 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
36749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36755 +4 4 4 4 4 4
36756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
36760 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
36761 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
36762 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
36763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36769 +4 4 4 4 4 4
36770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36774 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
36775 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
36776 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
36777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36783 +4 4 4 4 4 4
36784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36788 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
36789 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
36790 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36792 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36797 +4 4 4 4 4 4
36798 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36802 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
36803 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
36804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36811 +4 4 4 4 4 4
36812 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36816 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
36817 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
36818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
36825 +4 4 4 4 4 4
36826 diff -urNp linux-3.0.4/drivers/video/udlfb.c linux-3.0.4/drivers/video/udlfb.c
36827 --- linux-3.0.4/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
36828 +++ linux-3.0.4/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
36829 @@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
36830 dlfb_urb_completion(urb);
36831
36832 error:
36833 - atomic_add(bytes_sent, &dev->bytes_sent);
36834 - atomic_add(bytes_identical, &dev->bytes_identical);
36835 - atomic_add(width*height*2, &dev->bytes_rendered);
36836 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36837 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36838 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
36839 end_cycles = get_cycles();
36840 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
36841 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36842 >> 10)), /* Kcycles */
36843 &dev->cpu_kcycles_used);
36844
36845 @@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
36846 dlfb_urb_completion(urb);
36847
36848 error:
36849 - atomic_add(bytes_sent, &dev->bytes_sent);
36850 - atomic_add(bytes_identical, &dev->bytes_identical);
36851 - atomic_add(bytes_rendered, &dev->bytes_rendered);
36852 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
36853 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
36854 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
36855 end_cycles = get_cycles();
36856 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
36857 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
36858 >> 10)), /* Kcycles */
36859 &dev->cpu_kcycles_used);
36860 }
36861 @@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
36862 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36863 struct dlfb_data *dev = fb_info->par;
36864 return snprintf(buf, PAGE_SIZE, "%u\n",
36865 - atomic_read(&dev->bytes_rendered));
36866 + atomic_read_unchecked(&dev->bytes_rendered));
36867 }
36868
36869 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
36870 @@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
36871 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36872 struct dlfb_data *dev = fb_info->par;
36873 return snprintf(buf, PAGE_SIZE, "%u\n",
36874 - atomic_read(&dev->bytes_identical));
36875 + atomic_read_unchecked(&dev->bytes_identical));
36876 }
36877
36878 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
36879 @@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
36880 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36881 struct dlfb_data *dev = fb_info->par;
36882 return snprintf(buf, PAGE_SIZE, "%u\n",
36883 - atomic_read(&dev->bytes_sent));
36884 + atomic_read_unchecked(&dev->bytes_sent));
36885 }
36886
36887 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
36888 @@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
36889 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36890 struct dlfb_data *dev = fb_info->par;
36891 return snprintf(buf, PAGE_SIZE, "%u\n",
36892 - atomic_read(&dev->cpu_kcycles_used));
36893 + atomic_read_unchecked(&dev->cpu_kcycles_used));
36894 }
36895
36896 static ssize_t edid_show(
36897 @@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
36898 struct fb_info *fb_info = dev_get_drvdata(fbdev);
36899 struct dlfb_data *dev = fb_info->par;
36900
36901 - atomic_set(&dev->bytes_rendered, 0);
36902 - atomic_set(&dev->bytes_identical, 0);
36903 - atomic_set(&dev->bytes_sent, 0);
36904 - atomic_set(&dev->cpu_kcycles_used, 0);
36905 + atomic_set_unchecked(&dev->bytes_rendered, 0);
36906 + atomic_set_unchecked(&dev->bytes_identical, 0);
36907 + atomic_set_unchecked(&dev->bytes_sent, 0);
36908 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
36909
36910 return count;
36911 }
36912 diff -urNp linux-3.0.4/drivers/video/uvesafb.c linux-3.0.4/drivers/video/uvesafb.c
36913 --- linux-3.0.4/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
36914 +++ linux-3.0.4/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
36915 @@ -19,6 +19,7 @@
36916 #include <linux/io.h>
36917 #include <linux/mutex.h>
36918 #include <linux/slab.h>
36919 +#include <linux/moduleloader.h>
36920 #include <video/edid.h>
36921 #include <video/uvesafb.h>
36922 #ifdef CONFIG_X86
36923 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
36924 NULL,
36925 };
36926
36927 - return call_usermodehelper(v86d_path, argv, envp, 1);
36928 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36929 }
36930
36931 /*
36932 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
36933 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36934 par->pmi_setpal = par->ypan = 0;
36935 } else {
36936 +
36937 +#ifdef CONFIG_PAX_KERNEXEC
36938 +#ifdef CONFIG_MODULES
36939 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36940 +#endif
36941 + if (!par->pmi_code) {
36942 + par->pmi_setpal = par->ypan = 0;
36943 + return 0;
36944 + }
36945 +#endif
36946 +
36947 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36948 + task->t.regs.edi);
36949 +
36950 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36951 + pax_open_kernel();
36952 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36953 + pax_close_kernel();
36954 +
36955 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36956 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36957 +#else
36958 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36959 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36960 +#endif
36961 +
36962 printk(KERN_INFO "uvesafb: protected mode interface info at "
36963 "%04x:%04x\n",
36964 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36965 @@ -1821,6 +1844,11 @@ out:
36966 if (par->vbe_modes)
36967 kfree(par->vbe_modes);
36968
36969 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36970 + if (par->pmi_code)
36971 + module_free_exec(NULL, par->pmi_code);
36972 +#endif
36973 +
36974 framebuffer_release(info);
36975 return err;
36976 }
36977 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
36978 kfree(par->vbe_state_orig);
36979 if (par->vbe_state_saved)
36980 kfree(par->vbe_state_saved);
36981 +
36982 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36983 + if (par->pmi_code)
36984 + module_free_exec(NULL, par->pmi_code);
36985 +#endif
36986 +
36987 }
36988
36989 framebuffer_release(info);
36990 diff -urNp linux-3.0.4/drivers/video/vesafb.c linux-3.0.4/drivers/video/vesafb.c
36991 --- linux-3.0.4/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
36992 +++ linux-3.0.4/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
36993 @@ -9,6 +9,7 @@
36994 */
36995
36996 #include <linux/module.h>
36997 +#include <linux/moduleloader.h>
36998 #include <linux/kernel.h>
36999 #include <linux/errno.h>
37000 #include <linux/string.h>
37001 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
37002 static int vram_total __initdata; /* Set total amount of memory */
37003 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
37004 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
37005 -static void (*pmi_start)(void) __read_mostly;
37006 -static void (*pmi_pal) (void) __read_mostly;
37007 +static void (*pmi_start)(void) __read_only;
37008 +static void (*pmi_pal) (void) __read_only;
37009 static int depth __read_mostly;
37010 static int vga_compat __read_mostly;
37011 /* --------------------------------------------------------------------- */
37012 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
37013 unsigned int size_vmode;
37014 unsigned int size_remap;
37015 unsigned int size_total;
37016 + void *pmi_code = NULL;
37017
37018 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
37019 return -ENODEV;
37020 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
37021 size_remap = size_total;
37022 vesafb_fix.smem_len = size_remap;
37023
37024 -#ifndef __i386__
37025 - screen_info.vesapm_seg = 0;
37026 -#endif
37027 -
37028 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
37029 printk(KERN_WARNING
37030 "vesafb: cannot reserve video memory at 0x%lx\n",
37031 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
37032 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
37033 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
37034
37035 +#ifdef __i386__
37036 +
37037 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37038 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
37039 + if (!pmi_code)
37040 +#elif !defined(CONFIG_PAX_KERNEXEC)
37041 + if (0)
37042 +#endif
37043 +
37044 +#endif
37045 + screen_info.vesapm_seg = 0;
37046 +
37047 if (screen_info.vesapm_seg) {
37048 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
37049 - screen_info.vesapm_seg,screen_info.vesapm_off);
37050 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
37051 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
37052 }
37053
37054 if (screen_info.vesapm_seg < 0xc000)
37055 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
37056
37057 if (ypan || pmi_setpal) {
37058 unsigned short *pmi_base;
37059 +
37060 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
37061 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
37062 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
37063 +
37064 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37065 + pax_open_kernel();
37066 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
37067 +#else
37068 + pmi_code = pmi_base;
37069 +#endif
37070 +
37071 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
37072 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
37073 +
37074 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37075 + pmi_start = ktva_ktla(pmi_start);
37076 + pmi_pal = ktva_ktla(pmi_pal);
37077 + pax_close_kernel();
37078 +#endif
37079 +
37080 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
37081 if (pmi_base[3]) {
37082 printk(KERN_INFO "vesafb: pmi: ports = ");
37083 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
37084 info->node, info->fix.id);
37085 return 0;
37086 err:
37087 +
37088 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
37089 + module_free_exec(NULL, pmi_code);
37090 +#endif
37091 +
37092 if (info->screen_base)
37093 iounmap(info->screen_base);
37094 framebuffer_release(info);
37095 diff -urNp linux-3.0.4/drivers/video/via/via_clock.h linux-3.0.4/drivers/video/via/via_clock.h
37096 --- linux-3.0.4/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
37097 +++ linux-3.0.4/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
37098 @@ -56,7 +56,7 @@ struct via_clock {
37099
37100 void (*set_engine_pll_state)(u8 state);
37101 void (*set_engine_pll)(struct via_pll_config config);
37102 -};
37103 +} __no_const;
37104
37105
37106 static inline u32 get_pll_internal_frequency(u32 ref_freq,
37107 diff -urNp linux-3.0.4/drivers/virtio/virtio_balloon.c linux-3.0.4/drivers/virtio/virtio_balloon.c
37108 --- linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
37109 +++ linux-3.0.4/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
37110 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct
37111 struct sysinfo i;
37112 int idx = 0;
37113
37114 + pax_track_stack();
37115 +
37116 all_vm_events(events);
37117 si_meminfo(&i);
37118
37119 diff -urNp linux-3.0.4/fs/9p/vfs_inode.c linux-3.0.4/fs/9p/vfs_inode.c
37120 --- linux-3.0.4/fs/9p/vfs_inode.c 2011-07-21 22:17:23.000000000 -0400
37121 +++ linux-3.0.4/fs/9p/vfs_inode.c 2011-08-23 21:47:56.000000000 -0400
37122 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
37123 void
37124 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
37125 {
37126 - char *s = nd_get_link(nd);
37127 + const char *s = nd_get_link(nd);
37128
37129 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
37130 IS_ERR(s) ? "<error>" : s);
37131 diff -urNp linux-3.0.4/fs/aio.c linux-3.0.4/fs/aio.c
37132 --- linux-3.0.4/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
37133 +++ linux-3.0.4/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
37134 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
37135 size += sizeof(struct io_event) * nr_events;
37136 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
37137
37138 - if (nr_pages < 0)
37139 + if (nr_pages <= 0)
37140 return -EINVAL;
37141
37142 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
37143 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
37144 struct aio_timeout to;
37145 int retry = 0;
37146
37147 + pax_track_stack();
37148 +
37149 /* needed to zero any padding within an entry (there shouldn't be
37150 * any, but C is fun!
37151 */
37152 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
37153 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
37154 {
37155 ssize_t ret;
37156 + struct iovec iovstack;
37157
37158 #ifdef CONFIG_COMPAT
37159 if (compat)
37160 ret = compat_rw_copy_check_uvector(type,
37161 (struct compat_iovec __user *)kiocb->ki_buf,
37162 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37163 + kiocb->ki_nbytes, 1, &iovstack,
37164 &kiocb->ki_iovec);
37165 else
37166 #endif
37167 ret = rw_copy_check_uvector(type,
37168 (struct iovec __user *)kiocb->ki_buf,
37169 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
37170 + kiocb->ki_nbytes, 1, &iovstack,
37171 &kiocb->ki_iovec);
37172 if (ret < 0)
37173 goto out;
37174
37175 + if (kiocb->ki_iovec == &iovstack) {
37176 + kiocb->ki_inline_vec = iovstack;
37177 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
37178 + }
37179 kiocb->ki_nr_segs = kiocb->ki_nbytes;
37180 kiocb->ki_cur_seg = 0;
37181 /* ki_nbytes/left now reflect bytes instead of segs */
37182 diff -urNp linux-3.0.4/fs/attr.c linux-3.0.4/fs/attr.c
37183 --- linux-3.0.4/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
37184 +++ linux-3.0.4/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
37185 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
37186 unsigned long limit;
37187
37188 limit = rlimit(RLIMIT_FSIZE);
37189 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
37190 if (limit != RLIM_INFINITY && offset > limit)
37191 goto out_sig;
37192 if (offset > inode->i_sb->s_maxbytes)
37193 diff -urNp linux-3.0.4/fs/befs/linuxvfs.c linux-3.0.4/fs/befs/linuxvfs.c
37194 --- linux-3.0.4/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
37195 +++ linux-3.0.4/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
37196 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
37197 {
37198 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
37199 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
37200 - char *link = nd_get_link(nd);
37201 + const char *link = nd_get_link(nd);
37202 if (!IS_ERR(link))
37203 kfree(link);
37204 }
37205 diff -urNp linux-3.0.4/fs/binfmt_aout.c linux-3.0.4/fs/binfmt_aout.c
37206 --- linux-3.0.4/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
37207 +++ linux-3.0.4/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
37208 @@ -16,6 +16,7 @@
37209 #include <linux/string.h>
37210 #include <linux/fs.h>
37211 #include <linux/file.h>
37212 +#include <linux/security.h>
37213 #include <linux/stat.h>
37214 #include <linux/fcntl.h>
37215 #include <linux/ptrace.h>
37216 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
37217 #endif
37218 # define START_STACK(u) ((void __user *)u.start_stack)
37219
37220 + memset(&dump, 0, sizeof(dump));
37221 +
37222 fs = get_fs();
37223 set_fs(KERNEL_DS);
37224 has_dumped = 1;
37225 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
37226
37227 /* If the size of the dump file exceeds the rlimit, then see what would happen
37228 if we wrote the stack, but not the data area. */
37229 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
37230 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
37231 dump.u_dsize = 0;
37232
37233 /* Make sure we have enough room to write the stack and data areas. */
37234 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
37235 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
37236 dump.u_ssize = 0;
37237
37238 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
37239 rlim = rlimit(RLIMIT_DATA);
37240 if (rlim >= RLIM_INFINITY)
37241 rlim = ~0;
37242 +
37243 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
37244 if (ex.a_data + ex.a_bss > rlim)
37245 return -ENOMEM;
37246
37247 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
37248 install_exec_creds(bprm);
37249 current->flags &= ~PF_FORKNOEXEC;
37250
37251 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37252 + current->mm->pax_flags = 0UL;
37253 +#endif
37254 +
37255 +#ifdef CONFIG_PAX_PAGEEXEC
37256 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
37257 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
37258 +
37259 +#ifdef CONFIG_PAX_EMUTRAMP
37260 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
37261 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
37262 +#endif
37263 +
37264 +#ifdef CONFIG_PAX_MPROTECT
37265 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
37266 + current->mm->pax_flags |= MF_PAX_MPROTECT;
37267 +#endif
37268 +
37269 + }
37270 +#endif
37271 +
37272 if (N_MAGIC(ex) == OMAGIC) {
37273 unsigned long text_addr, map_size;
37274 loff_t pos;
37275 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
37276
37277 down_write(&current->mm->mmap_sem);
37278 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
37279 - PROT_READ | PROT_WRITE | PROT_EXEC,
37280 + PROT_READ | PROT_WRITE,
37281 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
37282 fd_offset + ex.a_text);
37283 up_write(&current->mm->mmap_sem);
37284 diff -urNp linux-3.0.4/fs/binfmt_elf.c linux-3.0.4/fs/binfmt_elf.c
37285 --- linux-3.0.4/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
37286 +++ linux-3.0.4/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
37287 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
37288 #define elf_core_dump NULL
37289 #endif
37290
37291 +#ifdef CONFIG_PAX_MPROTECT
37292 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
37293 +#endif
37294 +
37295 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
37296 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
37297 #else
37298 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
37299 .load_binary = load_elf_binary,
37300 .load_shlib = load_elf_library,
37301 .core_dump = elf_core_dump,
37302 +
37303 +#ifdef CONFIG_PAX_MPROTECT
37304 + .handle_mprotect= elf_handle_mprotect,
37305 +#endif
37306 +
37307 .min_coredump = ELF_EXEC_PAGESIZE,
37308 };
37309
37310 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
37311
37312 static int set_brk(unsigned long start, unsigned long end)
37313 {
37314 + unsigned long e = end;
37315 +
37316 start = ELF_PAGEALIGN(start);
37317 end = ELF_PAGEALIGN(end);
37318 if (end > start) {
37319 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
37320 if (BAD_ADDR(addr))
37321 return addr;
37322 }
37323 - current->mm->start_brk = current->mm->brk = end;
37324 + current->mm->start_brk = current->mm->brk = e;
37325 return 0;
37326 }
37327
37328 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
37329 elf_addr_t __user *u_rand_bytes;
37330 const char *k_platform = ELF_PLATFORM;
37331 const char *k_base_platform = ELF_BASE_PLATFORM;
37332 - unsigned char k_rand_bytes[16];
37333 + u32 k_rand_bytes[4];
37334 int items;
37335 elf_addr_t *elf_info;
37336 int ei_index = 0;
37337 const struct cred *cred = current_cred();
37338 struct vm_area_struct *vma;
37339 + unsigned long saved_auxv[AT_VECTOR_SIZE];
37340 +
37341 + pax_track_stack();
37342
37343 /*
37344 * In some cases (e.g. Hyper-Threading), we want to avoid L1
37345 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
37346 * Generate 16 random bytes for userspace PRNG seeding.
37347 */
37348 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
37349 - u_rand_bytes = (elf_addr_t __user *)
37350 - STACK_ALLOC(p, sizeof(k_rand_bytes));
37351 + srandom32(k_rand_bytes[0] ^ random32());
37352 + srandom32(k_rand_bytes[1] ^ random32());
37353 + srandom32(k_rand_bytes[2] ^ random32());
37354 + srandom32(k_rand_bytes[3] ^ random32());
37355 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
37356 + u_rand_bytes = (elf_addr_t __user *) p;
37357 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
37358 return -EFAULT;
37359
37360 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
37361 return -EFAULT;
37362 current->mm->env_end = p;
37363
37364 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
37365 +
37366 /* Put the elf_info on the stack in the right place. */
37367 sp = (elf_addr_t __user *)envp + 1;
37368 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
37369 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
37370 return -EFAULT;
37371 return 0;
37372 }
37373 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
37374 {
37375 struct elf_phdr *elf_phdata;
37376 struct elf_phdr *eppnt;
37377 - unsigned long load_addr = 0;
37378 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
37379 int load_addr_set = 0;
37380 unsigned long last_bss = 0, elf_bss = 0;
37381 - unsigned long error = ~0UL;
37382 + unsigned long error = -EINVAL;
37383 unsigned long total_size;
37384 int retval, i, size;
37385
37386 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
37387 goto out_close;
37388 }
37389
37390 +#ifdef CONFIG_PAX_SEGMEXEC
37391 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
37392 + pax_task_size = SEGMEXEC_TASK_SIZE;
37393 +#endif
37394 +
37395 eppnt = elf_phdata;
37396 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
37397 if (eppnt->p_type == PT_LOAD) {
37398 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
37399 k = load_addr + eppnt->p_vaddr;
37400 if (BAD_ADDR(k) ||
37401 eppnt->p_filesz > eppnt->p_memsz ||
37402 - eppnt->p_memsz > TASK_SIZE ||
37403 - TASK_SIZE - eppnt->p_memsz < k) {
37404 + eppnt->p_memsz > pax_task_size ||
37405 + pax_task_size - eppnt->p_memsz < k) {
37406 error = -ENOMEM;
37407 goto out_close;
37408 }
37409 @@ -528,6 +553,193 @@ out:
37410 return error;
37411 }
37412
37413 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
37414 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
37415 +{
37416 + unsigned long pax_flags = 0UL;
37417 +
37418 +#ifdef CONFIG_PAX_PAGEEXEC
37419 + if (elf_phdata->p_flags & PF_PAGEEXEC)
37420 + pax_flags |= MF_PAX_PAGEEXEC;
37421 +#endif
37422 +
37423 +#ifdef CONFIG_PAX_SEGMEXEC
37424 + if (elf_phdata->p_flags & PF_SEGMEXEC)
37425 + pax_flags |= MF_PAX_SEGMEXEC;
37426 +#endif
37427 +
37428 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
37429 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37430 + if ((__supported_pte_mask & _PAGE_NX))
37431 + pax_flags &= ~MF_PAX_SEGMEXEC;
37432 + else
37433 + pax_flags &= ~MF_PAX_PAGEEXEC;
37434 + }
37435 +#endif
37436 +
37437 +#ifdef CONFIG_PAX_EMUTRAMP
37438 + if (elf_phdata->p_flags & PF_EMUTRAMP)
37439 + pax_flags |= MF_PAX_EMUTRAMP;
37440 +#endif
37441 +
37442 +#ifdef CONFIG_PAX_MPROTECT
37443 + if (elf_phdata->p_flags & PF_MPROTECT)
37444 + pax_flags |= MF_PAX_MPROTECT;
37445 +#endif
37446 +
37447 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
37448 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
37449 + pax_flags |= MF_PAX_RANDMMAP;
37450 +#endif
37451 +
37452 + return pax_flags;
37453 +}
37454 +#endif
37455 +
37456 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
37457 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
37458 +{
37459 + unsigned long pax_flags = 0UL;
37460 +
37461 +#ifdef CONFIG_PAX_PAGEEXEC
37462 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
37463 + pax_flags |= MF_PAX_PAGEEXEC;
37464 +#endif
37465 +
37466 +#ifdef CONFIG_PAX_SEGMEXEC
37467 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
37468 + pax_flags |= MF_PAX_SEGMEXEC;
37469 +#endif
37470 +
37471 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
37472 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37473 + if ((__supported_pte_mask & _PAGE_NX))
37474 + pax_flags &= ~MF_PAX_SEGMEXEC;
37475 + else
37476 + pax_flags &= ~MF_PAX_PAGEEXEC;
37477 + }
37478 +#endif
37479 +
37480 +#ifdef CONFIG_PAX_EMUTRAMP
37481 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
37482 + pax_flags |= MF_PAX_EMUTRAMP;
37483 +#endif
37484 +
37485 +#ifdef CONFIG_PAX_MPROTECT
37486 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
37487 + pax_flags |= MF_PAX_MPROTECT;
37488 +#endif
37489 +
37490 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
37491 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
37492 + pax_flags |= MF_PAX_RANDMMAP;
37493 +#endif
37494 +
37495 + return pax_flags;
37496 +}
37497 +#endif
37498 +
37499 +#ifdef CONFIG_PAX_EI_PAX
37500 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
37501 +{
37502 + unsigned long pax_flags = 0UL;
37503 +
37504 +#ifdef CONFIG_PAX_PAGEEXEC
37505 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
37506 + pax_flags |= MF_PAX_PAGEEXEC;
37507 +#endif
37508 +
37509 +#ifdef CONFIG_PAX_SEGMEXEC
37510 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
37511 + pax_flags |= MF_PAX_SEGMEXEC;
37512 +#endif
37513 +
37514 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
37515 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37516 + if ((__supported_pte_mask & _PAGE_NX))
37517 + pax_flags &= ~MF_PAX_SEGMEXEC;
37518 + else
37519 + pax_flags &= ~MF_PAX_PAGEEXEC;
37520 + }
37521 +#endif
37522 +
37523 +#ifdef CONFIG_PAX_EMUTRAMP
37524 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
37525 + pax_flags |= MF_PAX_EMUTRAMP;
37526 +#endif
37527 +
37528 +#ifdef CONFIG_PAX_MPROTECT
37529 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
37530 + pax_flags |= MF_PAX_MPROTECT;
37531 +#endif
37532 +
37533 +#ifdef CONFIG_PAX_ASLR
37534 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
37535 + pax_flags |= MF_PAX_RANDMMAP;
37536 +#endif
37537 +
37538 + return pax_flags;
37539 +}
37540 +#endif
37541 +
37542 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37543 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
37544 +{
37545 + unsigned long pax_flags = 0UL;
37546 +
37547 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
37548 + unsigned long i;
37549 + int found_flags = 0;
37550 +#endif
37551 +
37552 +#ifdef CONFIG_PAX_EI_PAX
37553 + pax_flags = pax_parse_ei_pax(elf_ex);
37554 +#endif
37555 +
37556 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
37557 + for (i = 0UL; i < elf_ex->e_phnum; i++)
37558 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
37559 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
37560 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
37561 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
37562 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
37563 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
37564 + return -EINVAL;
37565 +
37566 +#ifdef CONFIG_PAX_SOFTMODE
37567 + if (pax_softmode)
37568 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
37569 + else
37570 +#endif
37571 +
37572 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
37573 + found_flags = 1;
37574 + break;
37575 + }
37576 +#endif
37577 +
37578 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37579 + if (found_flags == 0) {
37580 + struct elf_phdr phdr;
37581 + memset(&phdr, 0, sizeof(phdr));
37582 + phdr.p_flags = PF_NOEMUTRAMP;
37583 +#ifdef CONFIG_PAX_SOFTMODE
37584 + if (pax_softmode)
37585 + pax_flags = pax_parse_softmode(&phdr);
37586 + else
37587 +#endif
37588 + pax_flags = pax_parse_hardmode(&phdr);
37589 + }
37590 +#endif
37591 +
37592 + if (0 > pax_check_flags(&pax_flags))
37593 + return -EINVAL;
37594 +
37595 + current->mm->pax_flags = pax_flags;
37596 + return 0;
37597 +}
37598 +#endif
37599 +
37600 /*
37601 * These are the functions used to load ELF style executables and shared
37602 * libraries. There is no binary dependent code anywhere else.
37603 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
37604 {
37605 unsigned int random_variable = 0;
37606
37607 +#ifdef CONFIG_PAX_RANDUSTACK
37608 + if (randomize_va_space)
37609 + return stack_top - current->mm->delta_stack;
37610 +#endif
37611 +
37612 if ((current->flags & PF_RANDOMIZE) &&
37613 !(current->personality & ADDR_NO_RANDOMIZE)) {
37614 random_variable = get_random_int() & STACK_RND_MASK;
37615 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
37616 unsigned long load_addr = 0, load_bias = 0;
37617 int load_addr_set = 0;
37618 char * elf_interpreter = NULL;
37619 - unsigned long error;
37620 + unsigned long error = 0;
37621 struct elf_phdr *elf_ppnt, *elf_phdata;
37622 unsigned long elf_bss, elf_brk;
37623 int retval, i;
37624 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
37625 unsigned long start_code, end_code, start_data, end_data;
37626 unsigned long reloc_func_desc __maybe_unused = 0;
37627 int executable_stack = EXSTACK_DEFAULT;
37628 - unsigned long def_flags = 0;
37629 struct {
37630 struct elfhdr elf_ex;
37631 struct elfhdr interp_elf_ex;
37632 } *loc;
37633 + unsigned long pax_task_size = TASK_SIZE;
37634
37635 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37636 if (!loc) {
37637 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
37638
37639 /* OK, This is the point of no return */
37640 current->flags &= ~PF_FORKNOEXEC;
37641 - current->mm->def_flags = def_flags;
37642 +
37643 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37644 + current->mm->pax_flags = 0UL;
37645 +#endif
37646 +
37647 +#ifdef CONFIG_PAX_DLRESOLVE
37648 + current->mm->call_dl_resolve = 0UL;
37649 +#endif
37650 +
37651 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37652 + current->mm->call_syscall = 0UL;
37653 +#endif
37654 +
37655 +#ifdef CONFIG_PAX_ASLR
37656 + current->mm->delta_mmap = 0UL;
37657 + current->mm->delta_stack = 0UL;
37658 +#endif
37659 +
37660 + current->mm->def_flags = 0;
37661 +
37662 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37663 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37664 + send_sig(SIGKILL, current, 0);
37665 + goto out_free_dentry;
37666 + }
37667 +#endif
37668 +
37669 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37670 + pax_set_initial_flags(bprm);
37671 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37672 + if (pax_set_initial_flags_func)
37673 + (pax_set_initial_flags_func)(bprm);
37674 +#endif
37675 +
37676 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37677 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
37678 + current->mm->context.user_cs_limit = PAGE_SIZE;
37679 + current->mm->def_flags |= VM_PAGEEXEC;
37680 + }
37681 +#endif
37682 +
37683 +#ifdef CONFIG_PAX_SEGMEXEC
37684 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37685 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37686 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37687 + pax_task_size = SEGMEXEC_TASK_SIZE;
37688 + current->mm->def_flags |= VM_NOHUGEPAGE;
37689 + }
37690 +#endif
37691 +
37692 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37693 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37694 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37695 + put_cpu();
37696 + }
37697 +#endif
37698
37699 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37700 may depend on the personality. */
37701 SET_PERSONALITY(loc->elf_ex);
37702 +
37703 +#ifdef CONFIG_PAX_ASLR
37704 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37705 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37706 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37707 + }
37708 +#endif
37709 +
37710 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37711 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37712 + executable_stack = EXSTACK_DISABLE_X;
37713 + current->personality &= ~READ_IMPLIES_EXEC;
37714 + } else
37715 +#endif
37716 +
37717 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37718 current->personality |= READ_IMPLIES_EXEC;
37719
37720 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
37721 #else
37722 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37723 #endif
37724 +
37725 +#ifdef CONFIG_PAX_RANDMMAP
37726 + /* PaX: randomize base address at the default exe base if requested */
37727 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37728 +#ifdef CONFIG_SPARC64
37729 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37730 +#else
37731 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37732 +#endif
37733 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37734 + elf_flags |= MAP_FIXED;
37735 + }
37736 +#endif
37737 +
37738 }
37739
37740 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37741 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
37742 * allowed task size. Note that p_filesz must always be
37743 * <= p_memsz so it is only necessary to check p_memsz.
37744 */
37745 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37746 - elf_ppnt->p_memsz > TASK_SIZE ||
37747 - TASK_SIZE - elf_ppnt->p_memsz < k) {
37748 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37749 + elf_ppnt->p_memsz > pax_task_size ||
37750 + pax_task_size - elf_ppnt->p_memsz < k) {
37751 /* set_brk can never work. Avoid overflows. */
37752 send_sig(SIGKILL, current, 0);
37753 retval = -EINVAL;
37754 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
37755 start_data += load_bias;
37756 end_data += load_bias;
37757
37758 +#ifdef CONFIG_PAX_RANDMMAP
37759 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37760 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37761 +#endif
37762 +
37763 /* Calling set_brk effectively mmaps the pages that we need
37764 * for the bss and break sections. We must do this before
37765 * mapping in the interpreter, to make sure it doesn't wind
37766 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
37767 goto out_free_dentry;
37768 }
37769 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37770 - send_sig(SIGSEGV, current, 0);
37771 - retval = -EFAULT; /* Nobody gets to see this, but.. */
37772 - goto out_free_dentry;
37773 + /*
37774 + * This bss-zeroing can fail if the ELF
37775 + * file specifies odd protections. So
37776 + * we don't check the return value
37777 + */
37778 }
37779
37780 if (elf_interpreter) {
37781 @@ -1090,7 +1398,7 @@ out:
37782 * Decide what to dump of a segment, part, all or none.
37783 */
37784 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37785 - unsigned long mm_flags)
37786 + unsigned long mm_flags, long signr)
37787 {
37788 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37789
37790 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
37791 if (vma->vm_file == NULL)
37792 return 0;
37793
37794 - if (FILTER(MAPPED_PRIVATE))
37795 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37796 goto whole;
37797
37798 /*
37799 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
37800 {
37801 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37802 int i = 0;
37803 - do
37804 + do {
37805 i += 2;
37806 - while (auxv[i - 2] != AT_NULL);
37807 + } while (auxv[i - 2] != AT_NULL);
37808 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37809 }
37810
37811 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
37812 }
37813
37814 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
37815 - unsigned long mm_flags)
37816 + struct coredump_params *cprm)
37817 {
37818 struct vm_area_struct *vma;
37819 size_t size = 0;
37820
37821 for (vma = first_vma(current, gate_vma); vma != NULL;
37822 vma = next_vma(vma, gate_vma))
37823 - size += vma_dump_size(vma, mm_flags);
37824 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37825 return size;
37826 }
37827
37828 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
37829
37830 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
37831
37832 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
37833 + offset += elf_core_vma_data_size(gate_vma, cprm);
37834 offset += elf_core_extra_data_size();
37835 e_shoff = offset;
37836
37837 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
37838 offset = dataoff;
37839
37840 size += sizeof(*elf);
37841 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37842 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
37843 goto end_coredump;
37844
37845 size += sizeof(*phdr4note);
37846 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37847 if (size > cprm->limit
37848 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
37849 goto end_coredump;
37850 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
37851 phdr.p_offset = offset;
37852 phdr.p_vaddr = vma->vm_start;
37853 phdr.p_paddr = 0;
37854 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
37855 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37856 phdr.p_memsz = vma->vm_end - vma->vm_start;
37857 offset += phdr.p_filesz;
37858 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37859 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
37860 phdr.p_align = ELF_EXEC_PAGESIZE;
37861
37862 size += sizeof(phdr);
37863 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37864 if (size > cprm->limit
37865 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
37866 goto end_coredump;
37867 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
37868 unsigned long addr;
37869 unsigned long end;
37870
37871 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
37872 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
37873
37874 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37875 struct page *page;
37876 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
37877 page = get_dump_page(addr);
37878 if (page) {
37879 void *kaddr = kmap(page);
37880 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37881 stop = ((size += PAGE_SIZE) > cprm->limit) ||
37882 !dump_write(cprm->file, kaddr,
37883 PAGE_SIZE);
37884 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
37885
37886 if (e_phnum == PN_XNUM) {
37887 size += sizeof(*shdr4extnum);
37888 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
37889 if (size > cprm->limit
37890 || !dump_write(cprm->file, shdr4extnum,
37891 sizeof(*shdr4extnum)))
37892 @@ -2067,6 +2380,97 @@ out:
37893
37894 #endif /* CONFIG_ELF_CORE */
37895
37896 +#ifdef CONFIG_PAX_MPROTECT
37897 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
37898 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37899 + * we'll remove VM_MAYWRITE for good on RELRO segments.
37900 + *
37901 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37902 + * basis because we want to allow the common case and not the special ones.
37903 + */
37904 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37905 +{
37906 + struct elfhdr elf_h;
37907 + struct elf_phdr elf_p;
37908 + unsigned long i;
37909 + unsigned long oldflags;
37910 + bool is_textrel_rw, is_textrel_rx, is_relro;
37911 +
37912 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37913 + return;
37914 +
37915 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37916 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37917 +
37918 +#ifdef CONFIG_PAX_ELFRELOCS
37919 + /* possible TEXTREL */
37920 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37921 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37922 +#else
37923 + is_textrel_rw = false;
37924 + is_textrel_rx = false;
37925 +#endif
37926 +
37927 + /* possible RELRO */
37928 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37929 +
37930 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37931 + return;
37932 +
37933 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37934 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37935 +
37936 +#ifdef CONFIG_PAX_ETEXECRELOCS
37937 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37938 +#else
37939 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37940 +#endif
37941 +
37942 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37943 + !elf_check_arch(&elf_h) ||
37944 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37945 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37946 + return;
37947 +
37948 + for (i = 0UL; i < elf_h.e_phnum; i++) {
37949 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37950 + return;
37951 + switch (elf_p.p_type) {
37952 + case PT_DYNAMIC:
37953 + if (!is_textrel_rw && !is_textrel_rx)
37954 + continue;
37955 + i = 0UL;
37956 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37957 + elf_dyn dyn;
37958 +
37959 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37960 + return;
37961 + if (dyn.d_tag == DT_NULL)
37962 + return;
37963 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37964 + gr_log_textrel(vma);
37965 + if (is_textrel_rw)
37966 + vma->vm_flags |= VM_MAYWRITE;
37967 + else
37968 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37969 + vma->vm_flags &= ~VM_MAYWRITE;
37970 + return;
37971 + }
37972 + i++;
37973 + }
37974 + return;
37975 +
37976 + case PT_GNU_RELRO:
37977 + if (!is_relro)
37978 + continue;
37979 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37980 + vma->vm_flags &= ~VM_MAYWRITE;
37981 + return;
37982 + }
37983 + }
37984 +}
37985 +#endif
37986 +
37987 static int __init init_elf_binfmt(void)
37988 {
37989 return register_binfmt(&elf_format);
37990 diff -urNp linux-3.0.4/fs/binfmt_flat.c linux-3.0.4/fs/binfmt_flat.c
37991 --- linux-3.0.4/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
37992 +++ linux-3.0.4/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
37993 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
37994 realdatastart = (unsigned long) -ENOMEM;
37995 printk("Unable to allocate RAM for process data, errno %d\n",
37996 (int)-realdatastart);
37997 + down_write(&current->mm->mmap_sem);
37998 do_munmap(current->mm, textpos, text_len);
37999 + up_write(&current->mm->mmap_sem);
38000 ret = realdatastart;
38001 goto err;
38002 }
38003 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
38004 }
38005 if (IS_ERR_VALUE(result)) {
38006 printk("Unable to read data+bss, errno %d\n", (int)-result);
38007 + down_write(&current->mm->mmap_sem);
38008 do_munmap(current->mm, textpos, text_len);
38009 do_munmap(current->mm, realdatastart, len);
38010 + up_write(&current->mm->mmap_sem);
38011 ret = result;
38012 goto err;
38013 }
38014 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
38015 }
38016 if (IS_ERR_VALUE(result)) {
38017 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
38018 + down_write(&current->mm->mmap_sem);
38019 do_munmap(current->mm, textpos, text_len + data_len + extra +
38020 MAX_SHARED_LIBS * sizeof(unsigned long));
38021 + up_write(&current->mm->mmap_sem);
38022 ret = result;
38023 goto err;
38024 }
38025 diff -urNp linux-3.0.4/fs/bio.c linux-3.0.4/fs/bio.c
38026 --- linux-3.0.4/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
38027 +++ linux-3.0.4/fs/bio.c 2011-08-23 21:47:56.000000000 -0400
38028 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
38029 const int read = bio_data_dir(bio) == READ;
38030 struct bio_map_data *bmd = bio->bi_private;
38031 int i;
38032 - char *p = bmd->sgvecs[0].iov_base;
38033 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
38034
38035 __bio_for_each_segment(bvec, bio, i, 0) {
38036 char *addr = page_address(bvec->bv_page);
38037 diff -urNp linux-3.0.4/fs/block_dev.c linux-3.0.4/fs/block_dev.c
38038 --- linux-3.0.4/fs/block_dev.c 2011-07-21 22:17:23.000000000 -0400
38039 +++ linux-3.0.4/fs/block_dev.c 2011-08-23 21:47:56.000000000 -0400
38040 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
38041 else if (bdev->bd_contains == bdev)
38042 return true; /* is a whole device which isn't held */
38043
38044 - else if (whole->bd_holder == bd_may_claim)
38045 + else if (whole->bd_holder == (void *)bd_may_claim)
38046 return true; /* is a partition of a device that is being partitioned */
38047 else if (whole->bd_holder != NULL)
38048 return false; /* is a partition of a held device */
38049 diff -urNp linux-3.0.4/fs/btrfs/ctree.c linux-3.0.4/fs/btrfs/ctree.c
38050 --- linux-3.0.4/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
38051 +++ linux-3.0.4/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
38052 @@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
38053 free_extent_buffer(buf);
38054 add_root_to_dirty_list(root);
38055 } else {
38056 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
38057 - parent_start = parent->start;
38058 - else
38059 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
38060 + if (parent)
38061 + parent_start = parent->start;
38062 + else
38063 + parent_start = 0;
38064 + } else
38065 parent_start = 0;
38066
38067 WARN_ON(trans->transid != btrfs_header_generation(parent));
38068 diff -urNp linux-3.0.4/fs/btrfs/inode.c linux-3.0.4/fs/btrfs/inode.c
38069 --- linux-3.0.4/fs/btrfs/inode.c 2011-07-21 22:17:23.000000000 -0400
38070 +++ linux-3.0.4/fs/btrfs/inode.c 2011-08-23 21:48:14.000000000 -0400
38071 @@ -6895,7 +6895,7 @@ fail:
38072 return -ENOMEM;
38073 }
38074
38075 -static int btrfs_getattr(struct vfsmount *mnt,
38076 +int btrfs_getattr(struct vfsmount *mnt,
38077 struct dentry *dentry, struct kstat *stat)
38078 {
38079 struct inode *inode = dentry->d_inode;
38080 @@ -6907,6 +6907,14 @@ static int btrfs_getattr(struct vfsmount
38081 return 0;
38082 }
38083
38084 +EXPORT_SYMBOL(btrfs_getattr);
38085 +
38086 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
38087 +{
38088 + return BTRFS_I(inode)->root->anon_super.s_dev;
38089 +}
38090 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
38091 +
38092 /*
38093 * If a file is moved, it will inherit the cow and compression flags of the new
38094 * directory.
38095 diff -urNp linux-3.0.4/fs/btrfs/ioctl.c linux-3.0.4/fs/btrfs/ioctl.c
38096 --- linux-3.0.4/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
38097 +++ linux-3.0.4/fs/btrfs/ioctl.c 2011-08-23 21:48:14.000000000 -0400
38098 @@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
38099 for (i = 0; i < num_types; i++) {
38100 struct btrfs_space_info *tmp;
38101
38102 + /* Don't copy in more than we allocated */
38103 if (!slot_count)
38104 break;
38105
38106 + slot_count--;
38107 +
38108 info = NULL;
38109 rcu_read_lock();
38110 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
38111 @@ -2700,10 +2703,7 @@ long btrfs_ioctl_space_info(struct btrfs
38112 memcpy(dest, &space, sizeof(space));
38113 dest++;
38114 space_args.total_spaces++;
38115 - slot_count--;
38116 }
38117 - if (!slot_count)
38118 - break;
38119 }
38120 up_read(&info->groups_sem);
38121 }
38122 diff -urNp linux-3.0.4/fs/btrfs/relocation.c linux-3.0.4/fs/btrfs/relocation.c
38123 --- linux-3.0.4/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
38124 +++ linux-3.0.4/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
38125 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
38126 }
38127 spin_unlock(&rc->reloc_root_tree.lock);
38128
38129 - BUG_ON((struct btrfs_root *)node->data != root);
38130 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
38131
38132 if (!del) {
38133 spin_lock(&rc->reloc_root_tree.lock);
38134 diff -urNp linux-3.0.4/fs/cachefiles/bind.c linux-3.0.4/fs/cachefiles/bind.c
38135 --- linux-3.0.4/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
38136 +++ linux-3.0.4/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
38137 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
38138 args);
38139
38140 /* start by checking things over */
38141 - ASSERT(cache->fstop_percent >= 0 &&
38142 - cache->fstop_percent < cache->fcull_percent &&
38143 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
38144 cache->fcull_percent < cache->frun_percent &&
38145 cache->frun_percent < 100);
38146
38147 - ASSERT(cache->bstop_percent >= 0 &&
38148 - cache->bstop_percent < cache->bcull_percent &&
38149 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
38150 cache->bcull_percent < cache->brun_percent &&
38151 cache->brun_percent < 100);
38152
38153 diff -urNp linux-3.0.4/fs/cachefiles/daemon.c linux-3.0.4/fs/cachefiles/daemon.c
38154 --- linux-3.0.4/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
38155 +++ linux-3.0.4/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
38156 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
38157 if (n > buflen)
38158 return -EMSGSIZE;
38159
38160 - if (copy_to_user(_buffer, buffer, n) != 0)
38161 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
38162 return -EFAULT;
38163
38164 return n;
38165 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
38166 if (test_bit(CACHEFILES_DEAD, &cache->flags))
38167 return -EIO;
38168
38169 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
38170 + if (datalen > PAGE_SIZE - 1)
38171 return -EOPNOTSUPP;
38172
38173 /* drag the command string into the kernel so we can parse it */
38174 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
38175 if (args[0] != '%' || args[1] != '\0')
38176 return -EINVAL;
38177
38178 - if (fstop < 0 || fstop >= cache->fcull_percent)
38179 + if (fstop >= cache->fcull_percent)
38180 return cachefiles_daemon_range_error(cache, args);
38181
38182 cache->fstop_percent = fstop;
38183 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
38184 if (args[0] != '%' || args[1] != '\0')
38185 return -EINVAL;
38186
38187 - if (bstop < 0 || bstop >= cache->bcull_percent)
38188 + if (bstop >= cache->bcull_percent)
38189 return cachefiles_daemon_range_error(cache, args);
38190
38191 cache->bstop_percent = bstop;
38192 diff -urNp linux-3.0.4/fs/cachefiles/internal.h linux-3.0.4/fs/cachefiles/internal.h
38193 --- linux-3.0.4/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
38194 +++ linux-3.0.4/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
38195 @@ -57,7 +57,7 @@ struct cachefiles_cache {
38196 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
38197 struct rb_root active_nodes; /* active nodes (can't be culled) */
38198 rwlock_t active_lock; /* lock for active_nodes */
38199 - atomic_t gravecounter; /* graveyard uniquifier */
38200 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
38201 unsigned frun_percent; /* when to stop culling (% files) */
38202 unsigned fcull_percent; /* when to start culling (% files) */
38203 unsigned fstop_percent; /* when to stop allocating (% files) */
38204 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
38205 * proc.c
38206 */
38207 #ifdef CONFIG_CACHEFILES_HISTOGRAM
38208 -extern atomic_t cachefiles_lookup_histogram[HZ];
38209 -extern atomic_t cachefiles_mkdir_histogram[HZ];
38210 -extern atomic_t cachefiles_create_histogram[HZ];
38211 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
38212 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
38213 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
38214
38215 extern int __init cachefiles_proc_init(void);
38216 extern void cachefiles_proc_cleanup(void);
38217 static inline
38218 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
38219 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
38220 {
38221 unsigned long jif = jiffies - start_jif;
38222 if (jif >= HZ)
38223 jif = HZ - 1;
38224 - atomic_inc(&histogram[jif]);
38225 + atomic_inc_unchecked(&histogram[jif]);
38226 }
38227
38228 #else
38229 diff -urNp linux-3.0.4/fs/cachefiles/namei.c linux-3.0.4/fs/cachefiles/namei.c
38230 --- linux-3.0.4/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
38231 +++ linux-3.0.4/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
38232 @@ -318,7 +318,7 @@ try_again:
38233 /* first step is to make up a grave dentry in the graveyard */
38234 sprintf(nbuffer, "%08x%08x",
38235 (uint32_t) get_seconds(),
38236 - (uint32_t) atomic_inc_return(&cache->gravecounter));
38237 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
38238
38239 /* do the multiway lock magic */
38240 trap = lock_rename(cache->graveyard, dir);
38241 diff -urNp linux-3.0.4/fs/cachefiles/proc.c linux-3.0.4/fs/cachefiles/proc.c
38242 --- linux-3.0.4/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
38243 +++ linux-3.0.4/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
38244 @@ -14,9 +14,9 @@
38245 #include <linux/seq_file.h>
38246 #include "internal.h"
38247
38248 -atomic_t cachefiles_lookup_histogram[HZ];
38249 -atomic_t cachefiles_mkdir_histogram[HZ];
38250 -atomic_t cachefiles_create_histogram[HZ];
38251 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
38252 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
38253 +atomic_unchecked_t cachefiles_create_histogram[HZ];
38254
38255 /*
38256 * display the latency histogram
38257 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
38258 return 0;
38259 default:
38260 index = (unsigned long) v - 3;
38261 - x = atomic_read(&cachefiles_lookup_histogram[index]);
38262 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
38263 - z = atomic_read(&cachefiles_create_histogram[index]);
38264 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
38265 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
38266 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
38267 if (x == 0 && y == 0 && z == 0)
38268 return 0;
38269
38270 diff -urNp linux-3.0.4/fs/cachefiles/rdwr.c linux-3.0.4/fs/cachefiles/rdwr.c
38271 --- linux-3.0.4/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
38272 +++ linux-3.0.4/fs/cachefiles/rdwr.c 2011-08-23 21:47:56.000000000 -0400
38273 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
38274 old_fs = get_fs();
38275 set_fs(KERNEL_DS);
38276 ret = file->f_op->write(
38277 - file, (const void __user *) data, len, &pos);
38278 + file, (__force const void __user *) data, len, &pos);
38279 set_fs(old_fs);
38280 kunmap(page);
38281 if (ret != len)
38282 diff -urNp linux-3.0.4/fs/ceph/dir.c linux-3.0.4/fs/ceph/dir.c
38283 --- linux-3.0.4/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
38284 +++ linux-3.0.4/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
38285 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
38286 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
38287 struct ceph_mds_client *mdsc = fsc->mdsc;
38288 unsigned frag = fpos_frag(filp->f_pos);
38289 - int off = fpos_off(filp->f_pos);
38290 + unsigned int off = fpos_off(filp->f_pos);
38291 int err;
38292 u32 ftype;
38293 struct ceph_mds_reply_info_parsed *rinfo;
38294 diff -urNp linux-3.0.4/fs/cifs/cifs_debug.c linux-3.0.4/fs/cifs/cifs_debug.c
38295 --- linux-3.0.4/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
38296 +++ linux-3.0.4/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
38297 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
38298
38299 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
38300 #ifdef CONFIG_CIFS_STATS2
38301 - atomic_set(&totBufAllocCount, 0);
38302 - atomic_set(&totSmBufAllocCount, 0);
38303 + atomic_set_unchecked(&totBufAllocCount, 0);
38304 + atomic_set_unchecked(&totSmBufAllocCount, 0);
38305 #endif /* CONFIG_CIFS_STATS2 */
38306 spin_lock(&cifs_tcp_ses_lock);
38307 list_for_each(tmp1, &cifs_tcp_ses_list) {
38308 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
38309 tcon = list_entry(tmp3,
38310 struct cifs_tcon,
38311 tcon_list);
38312 - atomic_set(&tcon->num_smbs_sent, 0);
38313 - atomic_set(&tcon->num_writes, 0);
38314 - atomic_set(&tcon->num_reads, 0);
38315 - atomic_set(&tcon->num_oplock_brks, 0);
38316 - atomic_set(&tcon->num_opens, 0);
38317 - atomic_set(&tcon->num_posixopens, 0);
38318 - atomic_set(&tcon->num_posixmkdirs, 0);
38319 - atomic_set(&tcon->num_closes, 0);
38320 - atomic_set(&tcon->num_deletes, 0);
38321 - atomic_set(&tcon->num_mkdirs, 0);
38322 - atomic_set(&tcon->num_rmdirs, 0);
38323 - atomic_set(&tcon->num_renames, 0);
38324 - atomic_set(&tcon->num_t2renames, 0);
38325 - atomic_set(&tcon->num_ffirst, 0);
38326 - atomic_set(&tcon->num_fnext, 0);
38327 - atomic_set(&tcon->num_fclose, 0);
38328 - atomic_set(&tcon->num_hardlinks, 0);
38329 - atomic_set(&tcon->num_symlinks, 0);
38330 - atomic_set(&tcon->num_locks, 0);
38331 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
38332 + atomic_set_unchecked(&tcon->num_writes, 0);
38333 + atomic_set_unchecked(&tcon->num_reads, 0);
38334 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
38335 + atomic_set_unchecked(&tcon->num_opens, 0);
38336 + atomic_set_unchecked(&tcon->num_posixopens, 0);
38337 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
38338 + atomic_set_unchecked(&tcon->num_closes, 0);
38339 + atomic_set_unchecked(&tcon->num_deletes, 0);
38340 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
38341 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
38342 + atomic_set_unchecked(&tcon->num_renames, 0);
38343 + atomic_set_unchecked(&tcon->num_t2renames, 0);
38344 + atomic_set_unchecked(&tcon->num_ffirst, 0);
38345 + atomic_set_unchecked(&tcon->num_fnext, 0);
38346 + atomic_set_unchecked(&tcon->num_fclose, 0);
38347 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
38348 + atomic_set_unchecked(&tcon->num_symlinks, 0);
38349 + atomic_set_unchecked(&tcon->num_locks, 0);
38350 }
38351 }
38352 }
38353 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
38354 smBufAllocCount.counter, cifs_min_small);
38355 #ifdef CONFIG_CIFS_STATS2
38356 seq_printf(m, "Total Large %d Small %d Allocations\n",
38357 - atomic_read(&totBufAllocCount),
38358 - atomic_read(&totSmBufAllocCount));
38359 + atomic_read_unchecked(&totBufAllocCount),
38360 + atomic_read_unchecked(&totSmBufAllocCount));
38361 #endif /* CONFIG_CIFS_STATS2 */
38362
38363 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
38364 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
38365 if (tcon->need_reconnect)
38366 seq_puts(m, "\tDISCONNECTED ");
38367 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
38368 - atomic_read(&tcon->num_smbs_sent),
38369 - atomic_read(&tcon->num_oplock_brks));
38370 + atomic_read_unchecked(&tcon->num_smbs_sent),
38371 + atomic_read_unchecked(&tcon->num_oplock_brks));
38372 seq_printf(m, "\nReads: %d Bytes: %lld",
38373 - atomic_read(&tcon->num_reads),
38374 + atomic_read_unchecked(&tcon->num_reads),
38375 (long long)(tcon->bytes_read));
38376 seq_printf(m, "\nWrites: %d Bytes: %lld",
38377 - atomic_read(&tcon->num_writes),
38378 + atomic_read_unchecked(&tcon->num_writes),
38379 (long long)(tcon->bytes_written));
38380 seq_printf(m, "\nFlushes: %d",
38381 - atomic_read(&tcon->num_flushes));
38382 + atomic_read_unchecked(&tcon->num_flushes));
38383 seq_printf(m, "\nLocks: %d HardLinks: %d "
38384 "Symlinks: %d",
38385 - atomic_read(&tcon->num_locks),
38386 - atomic_read(&tcon->num_hardlinks),
38387 - atomic_read(&tcon->num_symlinks));
38388 + atomic_read_unchecked(&tcon->num_locks),
38389 + atomic_read_unchecked(&tcon->num_hardlinks),
38390 + atomic_read_unchecked(&tcon->num_symlinks));
38391 seq_printf(m, "\nOpens: %d Closes: %d "
38392 "Deletes: %d",
38393 - atomic_read(&tcon->num_opens),
38394 - atomic_read(&tcon->num_closes),
38395 - atomic_read(&tcon->num_deletes));
38396 + atomic_read_unchecked(&tcon->num_opens),
38397 + atomic_read_unchecked(&tcon->num_closes),
38398 + atomic_read_unchecked(&tcon->num_deletes));
38399 seq_printf(m, "\nPosix Opens: %d "
38400 "Posix Mkdirs: %d",
38401 - atomic_read(&tcon->num_posixopens),
38402 - atomic_read(&tcon->num_posixmkdirs));
38403 + atomic_read_unchecked(&tcon->num_posixopens),
38404 + atomic_read_unchecked(&tcon->num_posixmkdirs));
38405 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
38406 - atomic_read(&tcon->num_mkdirs),
38407 - atomic_read(&tcon->num_rmdirs));
38408 + atomic_read_unchecked(&tcon->num_mkdirs),
38409 + atomic_read_unchecked(&tcon->num_rmdirs));
38410 seq_printf(m, "\nRenames: %d T2 Renames %d",
38411 - atomic_read(&tcon->num_renames),
38412 - atomic_read(&tcon->num_t2renames));
38413 + atomic_read_unchecked(&tcon->num_renames),
38414 + atomic_read_unchecked(&tcon->num_t2renames));
38415 seq_printf(m, "\nFindFirst: %d FNext %d "
38416 "FClose %d",
38417 - atomic_read(&tcon->num_ffirst),
38418 - atomic_read(&tcon->num_fnext),
38419 - atomic_read(&tcon->num_fclose));
38420 + atomic_read_unchecked(&tcon->num_ffirst),
38421 + atomic_read_unchecked(&tcon->num_fnext),
38422 + atomic_read_unchecked(&tcon->num_fclose));
38423 }
38424 }
38425 }
38426 diff -urNp linux-3.0.4/fs/cifs/cifsfs.c linux-3.0.4/fs/cifs/cifsfs.c
38427 --- linux-3.0.4/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
38428 +++ linux-3.0.4/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
38429 @@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
38430 cifs_req_cachep = kmem_cache_create("cifs_request",
38431 CIFSMaxBufSize +
38432 MAX_CIFS_HDR_SIZE, 0,
38433 - SLAB_HWCACHE_ALIGN, NULL);
38434 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
38435 if (cifs_req_cachep == NULL)
38436 return -ENOMEM;
38437
38438 @@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
38439 efficient to alloc 1 per page off the slab compared to 17K (5page)
38440 alloc of large cifs buffers even when page debugging is on */
38441 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
38442 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
38443 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
38444 NULL);
38445 if (cifs_sm_req_cachep == NULL) {
38446 mempool_destroy(cifs_req_poolp);
38447 @@ -1106,8 +1106,8 @@ init_cifs(void)
38448 atomic_set(&bufAllocCount, 0);
38449 atomic_set(&smBufAllocCount, 0);
38450 #ifdef CONFIG_CIFS_STATS2
38451 - atomic_set(&totBufAllocCount, 0);
38452 - atomic_set(&totSmBufAllocCount, 0);
38453 + atomic_set_unchecked(&totBufAllocCount, 0);
38454 + atomic_set_unchecked(&totSmBufAllocCount, 0);
38455 #endif /* CONFIG_CIFS_STATS2 */
38456
38457 atomic_set(&midCount, 0);
38458 diff -urNp linux-3.0.4/fs/cifs/cifsglob.h linux-3.0.4/fs/cifs/cifsglob.h
38459 --- linux-3.0.4/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
38460 +++ linux-3.0.4/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
38461 @@ -381,28 +381,28 @@ struct cifs_tcon {
38462 __u16 Flags; /* optional support bits */
38463 enum statusEnum tidStatus;
38464 #ifdef CONFIG_CIFS_STATS
38465 - atomic_t num_smbs_sent;
38466 - atomic_t num_writes;
38467 - atomic_t num_reads;
38468 - atomic_t num_flushes;
38469 - atomic_t num_oplock_brks;
38470 - atomic_t num_opens;
38471 - atomic_t num_closes;
38472 - atomic_t num_deletes;
38473 - atomic_t num_mkdirs;
38474 - atomic_t num_posixopens;
38475 - atomic_t num_posixmkdirs;
38476 - atomic_t num_rmdirs;
38477 - atomic_t num_renames;
38478 - atomic_t num_t2renames;
38479 - atomic_t num_ffirst;
38480 - atomic_t num_fnext;
38481 - atomic_t num_fclose;
38482 - atomic_t num_hardlinks;
38483 - atomic_t num_symlinks;
38484 - atomic_t num_locks;
38485 - atomic_t num_acl_get;
38486 - atomic_t num_acl_set;
38487 + atomic_unchecked_t num_smbs_sent;
38488 + atomic_unchecked_t num_writes;
38489 + atomic_unchecked_t num_reads;
38490 + atomic_unchecked_t num_flushes;
38491 + atomic_unchecked_t num_oplock_brks;
38492 + atomic_unchecked_t num_opens;
38493 + atomic_unchecked_t num_closes;
38494 + atomic_unchecked_t num_deletes;
38495 + atomic_unchecked_t num_mkdirs;
38496 + atomic_unchecked_t num_posixopens;
38497 + atomic_unchecked_t num_posixmkdirs;
38498 + atomic_unchecked_t num_rmdirs;
38499 + atomic_unchecked_t num_renames;
38500 + atomic_unchecked_t num_t2renames;
38501 + atomic_unchecked_t num_ffirst;
38502 + atomic_unchecked_t num_fnext;
38503 + atomic_unchecked_t num_fclose;
38504 + atomic_unchecked_t num_hardlinks;
38505 + atomic_unchecked_t num_symlinks;
38506 + atomic_unchecked_t num_locks;
38507 + atomic_unchecked_t num_acl_get;
38508 + atomic_unchecked_t num_acl_set;
38509 #ifdef CONFIG_CIFS_STATS2
38510 unsigned long long time_writes;
38511 unsigned long long time_reads;
38512 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
38513 }
38514
38515 #ifdef CONFIG_CIFS_STATS
38516 -#define cifs_stats_inc atomic_inc
38517 +#define cifs_stats_inc atomic_inc_unchecked
38518
38519 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
38520 unsigned int bytes)
38521 @@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
38522 /* Various Debug counters */
38523 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
38524 #ifdef CONFIG_CIFS_STATS2
38525 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
38526 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
38527 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
38528 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
38529 #endif
38530 GLOBAL_EXTERN atomic_t smBufAllocCount;
38531 GLOBAL_EXTERN atomic_t midCount;
38532 diff -urNp linux-3.0.4/fs/cifs/link.c linux-3.0.4/fs/cifs/link.c
38533 --- linux-3.0.4/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
38534 +++ linux-3.0.4/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
38535 @@ -587,7 +587,7 @@ symlink_exit:
38536
38537 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
38538 {
38539 - char *p = nd_get_link(nd);
38540 + const char *p = nd_get_link(nd);
38541 if (!IS_ERR(p))
38542 kfree(p);
38543 }
38544 diff -urNp linux-3.0.4/fs/cifs/misc.c linux-3.0.4/fs/cifs/misc.c
38545 --- linux-3.0.4/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
38546 +++ linux-3.0.4/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
38547 @@ -156,7 +156,7 @@ cifs_buf_get(void)
38548 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
38549 atomic_inc(&bufAllocCount);
38550 #ifdef CONFIG_CIFS_STATS2
38551 - atomic_inc(&totBufAllocCount);
38552 + atomic_inc_unchecked(&totBufAllocCount);
38553 #endif /* CONFIG_CIFS_STATS2 */
38554 }
38555
38556 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
38557 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
38558 atomic_inc(&smBufAllocCount);
38559 #ifdef CONFIG_CIFS_STATS2
38560 - atomic_inc(&totSmBufAllocCount);
38561 + atomic_inc_unchecked(&totSmBufAllocCount);
38562 #endif /* CONFIG_CIFS_STATS2 */
38563
38564 }
38565 diff -urNp linux-3.0.4/fs/coda/cache.c linux-3.0.4/fs/coda/cache.c
38566 --- linux-3.0.4/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
38567 +++ linux-3.0.4/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
38568 @@ -24,7 +24,7 @@
38569 #include "coda_linux.h"
38570 #include "coda_cache.h"
38571
38572 -static atomic_t permission_epoch = ATOMIC_INIT(0);
38573 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38574
38575 /* replace or extend an acl cache hit */
38576 void coda_cache_enter(struct inode *inode, int mask)
38577 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
38578 struct coda_inode_info *cii = ITOC(inode);
38579
38580 spin_lock(&cii->c_lock);
38581 - cii->c_cached_epoch = atomic_read(&permission_epoch);
38582 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38583 if (cii->c_uid != current_fsuid()) {
38584 cii->c_uid = current_fsuid();
38585 cii->c_cached_perm = mask;
38586 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
38587 {
38588 struct coda_inode_info *cii = ITOC(inode);
38589 spin_lock(&cii->c_lock);
38590 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38591 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38592 spin_unlock(&cii->c_lock);
38593 }
38594
38595 /* remove all acl caches */
38596 void coda_cache_clear_all(struct super_block *sb)
38597 {
38598 - atomic_inc(&permission_epoch);
38599 + atomic_inc_unchecked(&permission_epoch);
38600 }
38601
38602
38603 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
38604 spin_lock(&cii->c_lock);
38605 hit = (mask & cii->c_cached_perm) == mask &&
38606 cii->c_uid == current_fsuid() &&
38607 - cii->c_cached_epoch == atomic_read(&permission_epoch);
38608 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38609 spin_unlock(&cii->c_lock);
38610
38611 return hit;
38612 diff -urNp linux-3.0.4/fs/compat_binfmt_elf.c linux-3.0.4/fs/compat_binfmt_elf.c
38613 --- linux-3.0.4/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
38614 +++ linux-3.0.4/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
38615 @@ -30,11 +30,13 @@
38616 #undef elf_phdr
38617 #undef elf_shdr
38618 #undef elf_note
38619 +#undef elf_dyn
38620 #undef elf_addr_t
38621 #define elfhdr elf32_hdr
38622 #define elf_phdr elf32_phdr
38623 #define elf_shdr elf32_shdr
38624 #define elf_note elf32_note
38625 +#define elf_dyn Elf32_Dyn
38626 #define elf_addr_t Elf32_Addr
38627
38628 /*
38629 diff -urNp linux-3.0.4/fs/compat.c linux-3.0.4/fs/compat.c
38630 --- linux-3.0.4/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
38631 +++ linux-3.0.4/fs/compat.c 2011-08-23 22:49:33.000000000 -0400
38632 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
38633 goto out;
38634
38635 ret = -EINVAL;
38636 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
38637 + if (nr_segs > UIO_MAXIOV)
38638 goto out;
38639 if (nr_segs > fast_segs) {
38640 ret = -ENOMEM;
38641 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
38642
38643 struct compat_readdir_callback {
38644 struct compat_old_linux_dirent __user *dirent;
38645 + struct file * file;
38646 int result;
38647 };
38648
38649 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
38650 buf->result = -EOVERFLOW;
38651 return -EOVERFLOW;
38652 }
38653 +
38654 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38655 + return 0;
38656 +
38657 buf->result++;
38658 dirent = buf->dirent;
38659 if (!access_ok(VERIFY_WRITE, dirent,
38660 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
38661
38662 buf.result = 0;
38663 buf.dirent = dirent;
38664 + buf.file = file;
38665
38666 error = vfs_readdir(file, compat_fillonedir, &buf);
38667 if (buf.result)
38668 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
38669 struct compat_getdents_callback {
38670 struct compat_linux_dirent __user *current_dir;
38671 struct compat_linux_dirent __user *previous;
38672 + struct file * file;
38673 int count;
38674 int error;
38675 };
38676 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
38677 buf->error = -EOVERFLOW;
38678 return -EOVERFLOW;
38679 }
38680 +
38681 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38682 + return 0;
38683 +
38684 dirent = buf->previous;
38685 if (dirent) {
38686 if (__put_user(offset, &dirent->d_off))
38687 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
38688 buf.previous = NULL;
38689 buf.count = count;
38690 buf.error = 0;
38691 + buf.file = file;
38692
38693 error = vfs_readdir(file, compat_filldir, &buf);
38694 if (error >= 0)
38695 @@ -1006,6 +1018,7 @@ out:
38696 struct compat_getdents_callback64 {
38697 struct linux_dirent64 __user *current_dir;
38698 struct linux_dirent64 __user *previous;
38699 + struct file * file;
38700 int count;
38701 int error;
38702 };
38703 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
38704 buf->error = -EINVAL; /* only used if we fail.. */
38705 if (reclen > buf->count)
38706 return -EINVAL;
38707 +
38708 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38709 + return 0;
38710 +
38711 dirent = buf->previous;
38712
38713 if (dirent) {
38714 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
38715 buf.previous = NULL;
38716 buf.count = count;
38717 buf.error = 0;
38718 + buf.file = file;
38719
38720 error = vfs_readdir(file, compat_filldir64, &buf);
38721 if (error >= 0)
38722 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
38723 struct fdtable *fdt;
38724 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38725
38726 + pax_track_stack();
38727 +
38728 if (n < 0)
38729 goto out_nofds;
38730
38731 diff -urNp linux-3.0.4/fs/compat_ioctl.c linux-3.0.4/fs/compat_ioctl.c
38732 --- linux-3.0.4/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
38733 +++ linux-3.0.4/fs/compat_ioctl.c 2011-08-23 21:47:56.000000000 -0400
38734 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
38735
38736 err = get_user(palp, &up->palette);
38737 err |= get_user(length, &up->length);
38738 + if (err)
38739 + return -EFAULT;
38740
38741 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38742 err = put_user(compat_ptr(palp), &up_native->palette);
38743 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
38744 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
38745 {
38746 unsigned int a, b;
38747 - a = *(unsigned int *)p;
38748 - b = *(unsigned int *)q;
38749 + a = *(const unsigned int *)p;
38750 + b = *(const unsigned int *)q;
38751 if (a > b)
38752 return 1;
38753 if (a < b)
38754 diff -urNp linux-3.0.4/fs/configfs/dir.c linux-3.0.4/fs/configfs/dir.c
38755 --- linux-3.0.4/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
38756 +++ linux-3.0.4/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
38757 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
38758 }
38759 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38760 struct configfs_dirent *next;
38761 - const char * name;
38762 + const unsigned char * name;
38763 + char d_name[sizeof(next->s_dentry->d_iname)];
38764 int len;
38765 struct inode *inode = NULL;
38766
38767 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
38768 continue;
38769
38770 name = configfs_get_name(next);
38771 - len = strlen(name);
38772 + if (next->s_dentry && name == next->s_dentry->d_iname) {
38773 + len = next->s_dentry->d_name.len;
38774 + memcpy(d_name, name, len);
38775 + name = d_name;
38776 + } else
38777 + len = strlen(name);
38778
38779 /*
38780 * We'll have a dentry and an inode for
38781 diff -urNp linux-3.0.4/fs/dcache.c linux-3.0.4/fs/dcache.c
38782 --- linux-3.0.4/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
38783 +++ linux-3.0.4/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
38784 @@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
38785 mempages -= reserve;
38786
38787 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38788 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38789 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38790
38791 dcache_init();
38792 inode_init();
38793 diff -urNp linux-3.0.4/fs/ecryptfs/inode.c linux-3.0.4/fs/ecryptfs/inode.c
38794 --- linux-3.0.4/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
38795 +++ linux-3.0.4/fs/ecryptfs/inode.c 2011-08-23 21:47:56.000000000 -0400
38796 @@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
38797 old_fs = get_fs();
38798 set_fs(get_ds());
38799 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38800 - (char __user *)lower_buf,
38801 + (__force char __user *)lower_buf,
38802 lower_bufsiz);
38803 set_fs(old_fs);
38804 if (rc < 0)
38805 @@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
38806 }
38807 old_fs = get_fs();
38808 set_fs(get_ds());
38809 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38810 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38811 set_fs(old_fs);
38812 if (rc < 0) {
38813 kfree(buf);
38814 @@ -765,7 +765,7 @@ out:
38815 static void
38816 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
38817 {
38818 - char *buf = nd_get_link(nd);
38819 + const char *buf = nd_get_link(nd);
38820 if (!IS_ERR(buf)) {
38821 /* Free the char* */
38822 kfree(buf);
38823 diff -urNp linux-3.0.4/fs/ecryptfs/miscdev.c linux-3.0.4/fs/ecryptfs/miscdev.c
38824 --- linux-3.0.4/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
38825 +++ linux-3.0.4/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
38826 @@ -328,7 +328,7 @@ check_list:
38827 goto out_unlock_msg_ctx;
38828 i = 5;
38829 if (msg_ctx->msg) {
38830 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
38831 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
38832 goto out_unlock_msg_ctx;
38833 i += packet_length_size;
38834 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
38835 diff -urNp linux-3.0.4/fs/exec.c linux-3.0.4/fs/exec.c
38836 --- linux-3.0.4/fs/exec.c 2011-07-21 22:17:23.000000000 -0400
38837 +++ linux-3.0.4/fs/exec.c 2011-08-25 17:26:58.000000000 -0400
38838 @@ -55,12 +55,24 @@
38839 #include <linux/pipe_fs_i.h>
38840 #include <linux/oom.h>
38841 #include <linux/compat.h>
38842 +#include <linux/random.h>
38843 +#include <linux/seq_file.h>
38844 +
38845 +#ifdef CONFIG_PAX_REFCOUNT
38846 +#include <linux/kallsyms.h>
38847 +#include <linux/kdebug.h>
38848 +#endif
38849
38850 #include <asm/uaccess.h>
38851 #include <asm/mmu_context.h>
38852 #include <asm/tlb.h>
38853 #include "internal.h"
38854
38855 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38856 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38857 +EXPORT_SYMBOL(pax_set_initial_flags_func);
38858 +#endif
38859 +
38860 int core_uses_pid;
38861 char core_pattern[CORENAME_MAX_SIZE] = "core";
38862 unsigned int core_pipe_limit;
38863 @@ -70,7 +82,7 @@ struct core_name {
38864 char *corename;
38865 int used, size;
38866 };
38867 -static atomic_t call_count = ATOMIC_INIT(1);
38868 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
38869
38870 /* The maximal length of core_pattern is also specified in sysctl.c */
38871
38872 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38873 char *tmp = getname(library);
38874 int error = PTR_ERR(tmp);
38875 static const struct open_flags uselib_flags = {
38876 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
38877 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
38878 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
38879 .intent = LOOKUP_OPEN
38880 };
38881 @@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
38882 int write)
38883 {
38884 struct page *page;
38885 - int ret;
38886
38887 -#ifdef CONFIG_STACK_GROWSUP
38888 - if (write) {
38889 - ret = expand_downwards(bprm->vma, pos);
38890 - if (ret < 0)
38891 - return NULL;
38892 - }
38893 -#endif
38894 - ret = get_user_pages(current, bprm->mm, pos,
38895 - 1, write, 1, &page, NULL);
38896 - if (ret <= 0)
38897 + if (0 > expand_downwards(bprm->vma, pos))
38898 + return NULL;
38899 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38900 return NULL;
38901
38902 if (write) {
38903 @@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
38904 vma->vm_end = STACK_TOP_MAX;
38905 vma->vm_start = vma->vm_end - PAGE_SIZE;
38906 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
38907 +
38908 +#ifdef CONFIG_PAX_SEGMEXEC
38909 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38910 +#endif
38911 +
38912 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38913 INIT_LIST_HEAD(&vma->anon_vma_chain);
38914
38915 @@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
38916 mm->stack_vm = mm->total_vm = 1;
38917 up_write(&mm->mmap_sem);
38918 bprm->p = vma->vm_end - sizeof(void *);
38919 +
38920 +#ifdef CONFIG_PAX_RANDUSTACK
38921 + if (randomize_va_space)
38922 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38923 +#endif
38924 +
38925 return 0;
38926 err:
38927 up_write(&mm->mmap_sem);
38928 @@ -403,19 +418,7 @@ err:
38929 return err;
38930 }
38931
38932 -struct user_arg_ptr {
38933 -#ifdef CONFIG_COMPAT
38934 - bool is_compat;
38935 -#endif
38936 - union {
38937 - const char __user *const __user *native;
38938 -#ifdef CONFIG_COMPAT
38939 - compat_uptr_t __user *compat;
38940 -#endif
38941 - } ptr;
38942 -};
38943 -
38944 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38945 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
38946 {
38947 const char __user *native;
38948
38949 @@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
38950 int r;
38951 mm_segment_t oldfs = get_fs();
38952 struct user_arg_ptr argv = {
38953 - .ptr.native = (const char __user *const __user *)__argv,
38954 + .ptr.native = (__force const char __user *const __user *)__argv,
38955 };
38956
38957 set_fs(KERNEL_DS);
38958 @@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
38959 unsigned long new_end = old_end - shift;
38960 struct mmu_gather tlb;
38961
38962 - BUG_ON(new_start > new_end);
38963 + if (new_start >= new_end || new_start < mmap_min_addr)
38964 + return -ENOMEM;
38965
38966 /*
38967 * ensure there are no vmas between where we want to go
38968 @@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
38969 if (vma != find_vma(mm, new_start))
38970 return -EFAULT;
38971
38972 +#ifdef CONFIG_PAX_SEGMEXEC
38973 + BUG_ON(pax_find_mirror_vma(vma));
38974 +#endif
38975 +
38976 /*
38977 * cover the whole range: [new_start, old_end)
38978 */
38979 @@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
38980 stack_top = arch_align_stack(stack_top);
38981 stack_top = PAGE_ALIGN(stack_top);
38982
38983 - if (unlikely(stack_top < mmap_min_addr) ||
38984 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38985 - return -ENOMEM;
38986 -
38987 stack_shift = vma->vm_end - stack_top;
38988
38989 bprm->p -= stack_shift;
38990 @@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
38991 bprm->exec -= stack_shift;
38992
38993 down_write(&mm->mmap_sem);
38994 +
38995 + /* Move stack pages down in memory. */
38996 + if (stack_shift) {
38997 + ret = shift_arg_pages(vma, stack_shift);
38998 + if (ret)
38999 + goto out_unlock;
39000 + }
39001 +
39002 vm_flags = VM_STACK_FLAGS;
39003
39004 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39005 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39006 + vm_flags &= ~VM_EXEC;
39007 +
39008 +#ifdef CONFIG_PAX_MPROTECT
39009 + if (mm->pax_flags & MF_PAX_MPROTECT)
39010 + vm_flags &= ~VM_MAYEXEC;
39011 +#endif
39012 +
39013 + }
39014 +#endif
39015 +
39016 /*
39017 * Adjust stack execute permissions; explicitly enable for
39018 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
39019 @@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
39020 goto out_unlock;
39021 BUG_ON(prev != vma);
39022
39023 - /* Move stack pages down in memory. */
39024 - if (stack_shift) {
39025 - ret = shift_arg_pages(vma, stack_shift);
39026 - if (ret)
39027 - goto out_unlock;
39028 - }
39029 -
39030 /* mprotect_fixup is overkill to remove the temporary stack flags */
39031 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
39032
39033 @@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
39034 struct file *file;
39035 int err;
39036 static const struct open_flags open_exec_flags = {
39037 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
39038 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
39039 .acc_mode = MAY_EXEC | MAY_OPEN,
39040 .intent = LOOKUP_OPEN
39041 };
39042 @@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
39043 old_fs = get_fs();
39044 set_fs(get_ds());
39045 /* The cast to a user pointer is valid due to the set_fs() */
39046 - result = vfs_read(file, (void __user *)addr, count, &pos);
39047 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
39048 set_fs(old_fs);
39049 return result;
39050 }
39051 @@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
39052 }
39053 rcu_read_unlock();
39054
39055 - if (p->fs->users > n_fs) {
39056 + if (atomic_read(&p->fs->users) > n_fs) {
39057 bprm->unsafe |= LSM_UNSAFE_SHARE;
39058 } else {
39059 res = -EAGAIN;
39060 @@ -1428,11 +1445,35 @@ static int do_execve_common(const char *
39061 struct user_arg_ptr envp,
39062 struct pt_regs *regs)
39063 {
39064 +#ifdef CONFIG_GRKERNSEC
39065 + struct file *old_exec_file;
39066 + struct acl_subject_label *old_acl;
39067 + struct rlimit old_rlim[RLIM_NLIMITS];
39068 +#endif
39069 struct linux_binprm *bprm;
39070 struct file *file;
39071 struct files_struct *displaced;
39072 bool clear_in_exec;
39073 int retval;
39074 + const struct cred *cred = current_cred();
39075 +
39076 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
39077 +
39078 + /*
39079 + * We move the actual failure in case of RLIMIT_NPROC excess from
39080 + * set*uid() to execve() because too many poorly written programs
39081 + * don't check setuid() return code. Here we additionally recheck
39082 + * whether NPROC limit is still exceeded.
39083 + */
39084 + if ((current->flags & PF_NPROC_EXCEEDED) &&
39085 + atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
39086 + retval = -EAGAIN;
39087 + goto out_ret;
39088 + }
39089 +
39090 + /* We're below the limit (still or again), so we don't want to make
39091 + * further execve() calls fail. */
39092 + current->flags &= ~PF_NPROC_EXCEEDED;
39093
39094 retval = unshare_files(&displaced);
39095 if (retval)
39096 @@ -1464,6 +1505,16 @@ static int do_execve_common(const char *
39097 bprm->filename = filename;
39098 bprm->interp = filename;
39099
39100 + if (gr_process_user_ban()) {
39101 + retval = -EPERM;
39102 + goto out_file;
39103 + }
39104 +
39105 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
39106 + retval = -EACCES;
39107 + goto out_file;
39108 + }
39109 +
39110 retval = bprm_mm_init(bprm);
39111 if (retval)
39112 goto out_file;
39113 @@ -1493,9 +1544,40 @@ static int do_execve_common(const char *
39114 if (retval < 0)
39115 goto out;
39116
39117 + if (!gr_tpe_allow(file)) {
39118 + retval = -EACCES;
39119 + goto out;
39120 + }
39121 +
39122 + if (gr_check_crash_exec(file)) {
39123 + retval = -EACCES;
39124 + goto out;
39125 + }
39126 +
39127 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
39128 +
39129 + gr_handle_exec_args(bprm, argv);
39130 +
39131 +#ifdef CONFIG_GRKERNSEC
39132 + old_acl = current->acl;
39133 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
39134 + old_exec_file = current->exec_file;
39135 + get_file(file);
39136 + current->exec_file = file;
39137 +#endif
39138 +
39139 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
39140 + bprm->unsafe & LSM_UNSAFE_SHARE);
39141 + if (retval < 0)
39142 + goto out_fail;
39143 +
39144 retval = search_binary_handler(bprm,regs);
39145 if (retval < 0)
39146 - goto out;
39147 + goto out_fail;
39148 +#ifdef CONFIG_GRKERNSEC
39149 + if (old_exec_file)
39150 + fput(old_exec_file);
39151 +#endif
39152
39153 /* execve succeeded */
39154 current->fs->in_exec = 0;
39155 @@ -1506,6 +1588,14 @@ static int do_execve_common(const char *
39156 put_files_struct(displaced);
39157 return retval;
39158
39159 +out_fail:
39160 +#ifdef CONFIG_GRKERNSEC
39161 + current->acl = old_acl;
39162 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
39163 + fput(current->exec_file);
39164 + current->exec_file = old_exec_file;
39165 +#endif
39166 +
39167 out:
39168 if (bprm->mm) {
39169 acct_arg_size(bprm, 0);
39170 @@ -1579,7 +1669,7 @@ static int expand_corename(struct core_n
39171 {
39172 char *old_corename = cn->corename;
39173
39174 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
39175 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
39176 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
39177
39178 if (!cn->corename) {
39179 @@ -1667,7 +1757,7 @@ static int format_corename(struct core_n
39180 int pid_in_pattern = 0;
39181 int err = 0;
39182
39183 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
39184 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
39185 cn->corename = kmalloc(cn->size, GFP_KERNEL);
39186 cn->used = 0;
39187
39188 @@ -1758,6 +1848,219 @@ out:
39189 return ispipe;
39190 }
39191
39192 +int pax_check_flags(unsigned long *flags)
39193 +{
39194 + int retval = 0;
39195 +
39196 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
39197 + if (*flags & MF_PAX_SEGMEXEC)
39198 + {
39199 + *flags &= ~MF_PAX_SEGMEXEC;
39200 + retval = -EINVAL;
39201 + }
39202 +#endif
39203 +
39204 + if ((*flags & MF_PAX_PAGEEXEC)
39205 +
39206 +#ifdef CONFIG_PAX_PAGEEXEC
39207 + && (*flags & MF_PAX_SEGMEXEC)
39208 +#endif
39209 +
39210 + )
39211 + {
39212 + *flags &= ~MF_PAX_PAGEEXEC;
39213 + retval = -EINVAL;
39214 + }
39215 +
39216 + if ((*flags & MF_PAX_MPROTECT)
39217 +
39218 +#ifdef CONFIG_PAX_MPROTECT
39219 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
39220 +#endif
39221 +
39222 + )
39223 + {
39224 + *flags &= ~MF_PAX_MPROTECT;
39225 + retval = -EINVAL;
39226 + }
39227 +
39228 + if ((*flags & MF_PAX_EMUTRAMP)
39229 +
39230 +#ifdef CONFIG_PAX_EMUTRAMP
39231 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
39232 +#endif
39233 +
39234 + )
39235 + {
39236 + *flags &= ~MF_PAX_EMUTRAMP;
39237 + retval = -EINVAL;
39238 + }
39239 +
39240 + return retval;
39241 +}
39242 +
39243 +EXPORT_SYMBOL(pax_check_flags);
39244 +
39245 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39246 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
39247 +{
39248 + struct task_struct *tsk = current;
39249 + struct mm_struct *mm = current->mm;
39250 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
39251 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
39252 + char *path_exec = NULL;
39253 + char *path_fault = NULL;
39254 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
39255 +
39256 + if (buffer_exec && buffer_fault) {
39257 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
39258 +
39259 + down_read(&mm->mmap_sem);
39260 + vma = mm->mmap;
39261 + while (vma && (!vma_exec || !vma_fault)) {
39262 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
39263 + vma_exec = vma;
39264 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
39265 + vma_fault = vma;
39266 + vma = vma->vm_next;
39267 + }
39268 + if (vma_exec) {
39269 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
39270 + if (IS_ERR(path_exec))
39271 + path_exec = "<path too long>";
39272 + else {
39273 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
39274 + if (path_exec) {
39275 + *path_exec = 0;
39276 + path_exec = buffer_exec;
39277 + } else
39278 + path_exec = "<path too long>";
39279 + }
39280 + }
39281 + if (vma_fault) {
39282 + start = vma_fault->vm_start;
39283 + end = vma_fault->vm_end;
39284 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
39285 + if (vma_fault->vm_file) {
39286 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
39287 + if (IS_ERR(path_fault))
39288 + path_fault = "<path too long>";
39289 + else {
39290 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
39291 + if (path_fault) {
39292 + *path_fault = 0;
39293 + path_fault = buffer_fault;
39294 + } else
39295 + path_fault = "<path too long>";
39296 + }
39297 + } else
39298 + path_fault = "<anonymous mapping>";
39299 + }
39300 + up_read(&mm->mmap_sem);
39301 + }
39302 + if (tsk->signal->curr_ip)
39303 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
39304 + else
39305 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
39306 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
39307 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
39308 + task_uid(tsk), task_euid(tsk), pc, sp);
39309 + free_page((unsigned long)buffer_exec);
39310 + free_page((unsigned long)buffer_fault);
39311 + pax_report_insns(pc, sp);
39312 + do_coredump(SIGKILL, SIGKILL, regs);
39313 +}
39314 +#endif
39315 +
39316 +#ifdef CONFIG_PAX_REFCOUNT
39317 +void pax_report_refcount_overflow(struct pt_regs *regs)
39318 +{
39319 + if (current->signal->curr_ip)
39320 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
39321 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
39322 + else
39323 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
39324 + current->comm, task_pid_nr(current), current_uid(), current_euid());
39325 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
39326 + show_regs(regs);
39327 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
39328 +}
39329 +#endif
39330 +
39331 +#ifdef CONFIG_PAX_USERCOPY
39332 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
39333 +int object_is_on_stack(const void *obj, unsigned long len)
39334 +{
39335 + const void * const stack = task_stack_page(current);
39336 + const void * const stackend = stack + THREAD_SIZE;
39337 +
39338 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
39339 + const void *frame = NULL;
39340 + const void *oldframe;
39341 +#endif
39342 +
39343 + if (obj + len < obj)
39344 + return -1;
39345 +
39346 + if (obj + len <= stack || stackend <= obj)
39347 + return 0;
39348 +
39349 + if (obj < stack || stackend < obj + len)
39350 + return -1;
39351 +
39352 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
39353 + oldframe = __builtin_frame_address(1);
39354 + if (oldframe)
39355 + frame = __builtin_frame_address(2);
39356 + /*
39357 + low ----------------------------------------------> high
39358 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
39359 + ^----------------^
39360 + allow copies only within here
39361 + */
39362 + while (stack <= frame && frame < stackend) {
39363 + /* if obj + len extends past the last frame, this
39364 + check won't pass and the next frame will be 0,
39365 + causing us to bail out and correctly report
39366 + the copy as invalid
39367 + */
39368 + if (obj + len <= frame)
39369 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
39370 + oldframe = frame;
39371 + frame = *(const void * const *)frame;
39372 + }
39373 + return -1;
39374 +#else
39375 + return 1;
39376 +#endif
39377 +}
39378 +
39379 +
39380 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
39381 +{
39382 + if (current->signal->curr_ip)
39383 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
39384 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
39385 + else
39386 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
39387 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
39388 + dump_stack();
39389 + gr_handle_kernel_exploit();
39390 + do_group_exit(SIGKILL);
39391 +}
39392 +#endif
39393 +
39394 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
39395 +void pax_track_stack(void)
39396 +{
39397 + unsigned long sp = (unsigned long)&sp;
39398 + if (sp < current_thread_info()->lowest_stack &&
39399 + sp > (unsigned long)task_stack_page(current))
39400 + current_thread_info()->lowest_stack = sp;
39401 +}
39402 +EXPORT_SYMBOL(pax_track_stack);
39403 +#endif
39404 +
39405 static int zap_process(struct task_struct *start, int exit_code)
39406 {
39407 struct task_struct *t;
39408 @@ -1969,17 +2272,17 @@ static void wait_for_dump_helpers(struct
39409 pipe = file->f_path.dentry->d_inode->i_pipe;
39410
39411 pipe_lock(pipe);
39412 - pipe->readers++;
39413 - pipe->writers--;
39414 + atomic_inc(&pipe->readers);
39415 + atomic_dec(&pipe->writers);
39416
39417 - while ((pipe->readers > 1) && (!signal_pending(current))) {
39418 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
39419 wake_up_interruptible_sync(&pipe->wait);
39420 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39421 pipe_wait(pipe);
39422 }
39423
39424 - pipe->readers--;
39425 - pipe->writers++;
39426 + atomic_dec(&pipe->readers);
39427 + atomic_inc(&pipe->writers);
39428 pipe_unlock(pipe);
39429
39430 }
39431 @@ -2040,7 +2343,7 @@ void do_coredump(long signr, int exit_co
39432 int retval = 0;
39433 int flag = 0;
39434 int ispipe;
39435 - static atomic_t core_dump_count = ATOMIC_INIT(0);
39436 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
39437 struct coredump_params cprm = {
39438 .signr = signr,
39439 .regs = regs,
39440 @@ -2055,6 +2358,9 @@ void do_coredump(long signr, int exit_co
39441
39442 audit_core_dumps(signr);
39443
39444 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
39445 + gr_handle_brute_attach(current, cprm.mm_flags);
39446 +
39447 binfmt = mm->binfmt;
39448 if (!binfmt || !binfmt->core_dump)
39449 goto fail;
39450 @@ -2095,6 +2401,8 @@ void do_coredump(long signr, int exit_co
39451 goto fail_corename;
39452 }
39453
39454 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
39455 +
39456 if (ispipe) {
39457 int dump_count;
39458 char **helper_argv;
39459 @@ -2122,7 +2430,7 @@ void do_coredump(long signr, int exit_co
39460 }
39461 cprm.limit = RLIM_INFINITY;
39462
39463 - dump_count = atomic_inc_return(&core_dump_count);
39464 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
39465 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
39466 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
39467 task_tgid_vnr(current), current->comm);
39468 @@ -2192,7 +2500,7 @@ close_fail:
39469 filp_close(cprm.file, NULL);
39470 fail_dropcount:
39471 if (ispipe)
39472 - atomic_dec(&core_dump_count);
39473 + atomic_dec_unchecked(&core_dump_count);
39474 fail_unlock:
39475 kfree(cn.corename);
39476 fail_corename:
39477 diff -urNp linux-3.0.4/fs/ext2/balloc.c linux-3.0.4/fs/ext2/balloc.c
39478 --- linux-3.0.4/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
39479 +++ linux-3.0.4/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
39480 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
39481
39482 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
39483 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
39484 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
39485 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
39486 sbi->s_resuid != current_fsuid() &&
39487 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
39488 return 0;
39489 diff -urNp linux-3.0.4/fs/ext3/balloc.c linux-3.0.4/fs/ext3/balloc.c
39490 --- linux-3.0.4/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
39491 +++ linux-3.0.4/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
39492 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
39493
39494 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
39495 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
39496 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
39497 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
39498 sbi->s_resuid != current_fsuid() &&
39499 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
39500 return 0;
39501 diff -urNp linux-3.0.4/fs/ext4/balloc.c linux-3.0.4/fs/ext4/balloc.c
39502 --- linux-3.0.4/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
39503 +++ linux-3.0.4/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
39504 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
39505 /* Hm, nope. Are (enough) root reserved blocks available? */
39506 if (sbi->s_resuid == current_fsuid() ||
39507 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
39508 - capable(CAP_SYS_RESOURCE) ||
39509 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
39510 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
39511 + capable_nolog(CAP_SYS_RESOURCE)) {
39512
39513 if (free_blocks >= (nblocks + dirty_blocks))
39514 return 1;
39515 diff -urNp linux-3.0.4/fs/ext4/ext4.h linux-3.0.4/fs/ext4/ext4.h
39516 --- linux-3.0.4/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
39517 +++ linux-3.0.4/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
39518 @@ -1177,19 +1177,19 @@ struct ext4_sb_info {
39519 unsigned long s_mb_last_start;
39520
39521 /* stats for buddy allocator */
39522 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
39523 - atomic_t s_bal_success; /* we found long enough chunks */
39524 - atomic_t s_bal_allocated; /* in blocks */
39525 - atomic_t s_bal_ex_scanned; /* total extents scanned */
39526 - atomic_t s_bal_goals; /* goal hits */
39527 - atomic_t s_bal_breaks; /* too long searches */
39528 - atomic_t s_bal_2orders; /* 2^order hits */
39529 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
39530 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
39531 + atomic_unchecked_t s_bal_allocated; /* in blocks */
39532 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
39533 + atomic_unchecked_t s_bal_goals; /* goal hits */
39534 + atomic_unchecked_t s_bal_breaks; /* too long searches */
39535 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
39536 spinlock_t s_bal_lock;
39537 unsigned long s_mb_buddies_generated;
39538 unsigned long long s_mb_generation_time;
39539 - atomic_t s_mb_lost_chunks;
39540 - atomic_t s_mb_preallocated;
39541 - atomic_t s_mb_discarded;
39542 + atomic_unchecked_t s_mb_lost_chunks;
39543 + atomic_unchecked_t s_mb_preallocated;
39544 + atomic_unchecked_t s_mb_discarded;
39545 atomic_t s_lock_busy;
39546
39547 /* locality groups */
39548 diff -urNp linux-3.0.4/fs/ext4/mballoc.c linux-3.0.4/fs/ext4/mballoc.c
39549 --- linux-3.0.4/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
39550 +++ linux-3.0.4/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
39551 @@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
39552 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
39553
39554 if (EXT4_SB(sb)->s_mb_stats)
39555 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39556 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39557
39558 break;
39559 }
39560 @@ -2087,7 +2087,7 @@ repeat:
39561 ac->ac_status = AC_STATUS_CONTINUE;
39562 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39563 cr = 3;
39564 - atomic_inc(&sbi->s_mb_lost_chunks);
39565 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39566 goto repeat;
39567 }
39568 }
39569 @@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
39570 ext4_grpblk_t counters[16];
39571 } sg;
39572
39573 + pax_track_stack();
39574 +
39575 group--;
39576 if (group == 0)
39577 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39578 @@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
39579 if (sbi->s_mb_stats) {
39580 printk(KERN_INFO
39581 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39582 - atomic_read(&sbi->s_bal_allocated),
39583 - atomic_read(&sbi->s_bal_reqs),
39584 - atomic_read(&sbi->s_bal_success));
39585 + atomic_read_unchecked(&sbi->s_bal_allocated),
39586 + atomic_read_unchecked(&sbi->s_bal_reqs),
39587 + atomic_read_unchecked(&sbi->s_bal_success));
39588 printk(KERN_INFO
39589 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39590 "%u 2^N hits, %u breaks, %u lost\n",
39591 - atomic_read(&sbi->s_bal_ex_scanned),
39592 - atomic_read(&sbi->s_bal_goals),
39593 - atomic_read(&sbi->s_bal_2orders),
39594 - atomic_read(&sbi->s_bal_breaks),
39595 - atomic_read(&sbi->s_mb_lost_chunks));
39596 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39597 + atomic_read_unchecked(&sbi->s_bal_goals),
39598 + atomic_read_unchecked(&sbi->s_bal_2orders),
39599 + atomic_read_unchecked(&sbi->s_bal_breaks),
39600 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39601 printk(KERN_INFO
39602 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39603 sbi->s_mb_buddies_generated++,
39604 sbi->s_mb_generation_time);
39605 printk(KERN_INFO
39606 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39607 - atomic_read(&sbi->s_mb_preallocated),
39608 - atomic_read(&sbi->s_mb_discarded));
39609 + atomic_read_unchecked(&sbi->s_mb_preallocated),
39610 + atomic_read_unchecked(&sbi->s_mb_discarded));
39611 }
39612
39613 free_percpu(sbi->s_locality_groups);
39614 @@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
39615 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39616
39617 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39618 - atomic_inc(&sbi->s_bal_reqs);
39619 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39620 + atomic_inc_unchecked(&sbi->s_bal_reqs);
39621 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39622 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
39623 - atomic_inc(&sbi->s_bal_success);
39624 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39625 + atomic_inc_unchecked(&sbi->s_bal_success);
39626 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39627 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39628 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39629 - atomic_inc(&sbi->s_bal_goals);
39630 + atomic_inc_unchecked(&sbi->s_bal_goals);
39631 if (ac->ac_found > sbi->s_mb_max_to_scan)
39632 - atomic_inc(&sbi->s_bal_breaks);
39633 + atomic_inc_unchecked(&sbi->s_bal_breaks);
39634 }
39635
39636 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39637 @@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39638 trace_ext4_mb_new_inode_pa(ac, pa);
39639
39640 ext4_mb_use_inode_pa(ac, pa);
39641 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39642 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39643
39644 ei = EXT4_I(ac->ac_inode);
39645 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39646 @@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39647 trace_ext4_mb_new_group_pa(ac, pa);
39648
39649 ext4_mb_use_group_pa(ac, pa);
39650 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39651 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39652
39653 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39654 lg = ac->ac_lg;
39655 @@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39656 * from the bitmap and continue.
39657 */
39658 }
39659 - atomic_add(free, &sbi->s_mb_discarded);
39660 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
39661
39662 return err;
39663 }
39664 @@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39665 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39666 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39667 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39668 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39669 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39670 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
39671
39672 return 0;
39673 diff -urNp linux-3.0.4/fs/fcntl.c linux-3.0.4/fs/fcntl.c
39674 --- linux-3.0.4/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
39675 +++ linux-3.0.4/fs/fcntl.c 2011-08-23 21:48:14.000000000 -0400
39676 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
39677 if (err)
39678 return err;
39679
39680 + if (gr_handle_chroot_fowner(pid, type))
39681 + return -ENOENT;
39682 + if (gr_check_protected_task_fowner(pid, type))
39683 + return -EACCES;
39684 +
39685 f_modown(filp, pid, type, force);
39686 return 0;
39687 }
39688 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
39689 switch (cmd) {
39690 case F_DUPFD:
39691 case F_DUPFD_CLOEXEC:
39692 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39693 if (arg >= rlimit(RLIMIT_NOFILE))
39694 break;
39695 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39696 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
39697 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
39698 * is defined as O_NONBLOCK on some platforms and not on others.
39699 */
39700 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39701 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
39702 O_RDONLY | O_WRONLY | O_RDWR |
39703 O_CREAT | O_EXCL | O_NOCTTY |
39704 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
39705 __O_SYNC | O_DSYNC | FASYNC |
39706 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
39707 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
39708 - __FMODE_EXEC | O_PATH
39709 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
39710 ));
39711
39712 fasync_cache = kmem_cache_create("fasync_cache",
39713 diff -urNp linux-3.0.4/fs/fifo.c linux-3.0.4/fs/fifo.c
39714 --- linux-3.0.4/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
39715 +++ linux-3.0.4/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
39716 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
39717 */
39718 filp->f_op = &read_pipefifo_fops;
39719 pipe->r_counter++;
39720 - if (pipe->readers++ == 0)
39721 + if (atomic_inc_return(&pipe->readers) == 1)
39722 wake_up_partner(inode);
39723
39724 - if (!pipe->writers) {
39725 + if (!atomic_read(&pipe->writers)) {
39726 if ((filp->f_flags & O_NONBLOCK)) {
39727 /* suppress POLLHUP until we have
39728 * seen a writer */
39729 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
39730 * errno=ENXIO when there is no process reading the FIFO.
39731 */
39732 ret = -ENXIO;
39733 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39734 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39735 goto err;
39736
39737 filp->f_op = &write_pipefifo_fops;
39738 pipe->w_counter++;
39739 - if (!pipe->writers++)
39740 + if (atomic_inc_return(&pipe->writers) == 1)
39741 wake_up_partner(inode);
39742
39743 - if (!pipe->readers) {
39744 + if (!atomic_read(&pipe->readers)) {
39745 wait_for_partner(inode, &pipe->r_counter);
39746 if (signal_pending(current))
39747 goto err_wr;
39748 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
39749 */
39750 filp->f_op = &rdwr_pipefifo_fops;
39751
39752 - pipe->readers++;
39753 - pipe->writers++;
39754 + atomic_inc(&pipe->readers);
39755 + atomic_inc(&pipe->writers);
39756 pipe->r_counter++;
39757 pipe->w_counter++;
39758 - if (pipe->readers == 1 || pipe->writers == 1)
39759 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39760 wake_up_partner(inode);
39761 break;
39762
39763 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
39764 return 0;
39765
39766 err_rd:
39767 - if (!--pipe->readers)
39768 + if (atomic_dec_and_test(&pipe->readers))
39769 wake_up_interruptible(&pipe->wait);
39770 ret = -ERESTARTSYS;
39771 goto err;
39772
39773 err_wr:
39774 - if (!--pipe->writers)
39775 + if (atomic_dec_and_test(&pipe->writers))
39776 wake_up_interruptible(&pipe->wait);
39777 ret = -ERESTARTSYS;
39778 goto err;
39779
39780 err:
39781 - if (!pipe->readers && !pipe->writers)
39782 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39783 free_pipe_info(inode);
39784
39785 err_nocleanup:
39786 diff -urNp linux-3.0.4/fs/file.c linux-3.0.4/fs/file.c
39787 --- linux-3.0.4/fs/file.c 2011-07-21 22:17:23.000000000 -0400
39788 +++ linux-3.0.4/fs/file.c 2011-08-23 21:48:14.000000000 -0400
39789 @@ -15,6 +15,7 @@
39790 #include <linux/slab.h>
39791 #include <linux/vmalloc.h>
39792 #include <linux/file.h>
39793 +#include <linux/security.h>
39794 #include <linux/fdtable.h>
39795 #include <linux/bitops.h>
39796 #include <linux/interrupt.h>
39797 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
39798 * N.B. For clone tasks sharing a files structure, this test
39799 * will limit the total number of files that can be opened.
39800 */
39801 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39802 if (nr >= rlimit(RLIMIT_NOFILE))
39803 return -EMFILE;
39804
39805 diff -urNp linux-3.0.4/fs/filesystems.c linux-3.0.4/fs/filesystems.c
39806 --- linux-3.0.4/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
39807 +++ linux-3.0.4/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
39808 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
39809 int len = dot ? dot - name : strlen(name);
39810
39811 fs = __get_fs_type(name, len);
39812 +
39813 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
39814 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39815 +#else
39816 if (!fs && (request_module("%.*s", len, name) == 0))
39817 +#endif
39818 fs = __get_fs_type(name, len);
39819
39820 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39821 diff -urNp linux-3.0.4/fs/fscache/cookie.c linux-3.0.4/fs/fscache/cookie.c
39822 --- linux-3.0.4/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
39823 +++ linux-3.0.4/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
39824 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39825 parent ? (char *) parent->def->name : "<no-parent>",
39826 def->name, netfs_data);
39827
39828 - fscache_stat(&fscache_n_acquires);
39829 + fscache_stat_unchecked(&fscache_n_acquires);
39830
39831 /* if there's no parent cookie, then we don't create one here either */
39832 if (!parent) {
39833 - fscache_stat(&fscache_n_acquires_null);
39834 + fscache_stat_unchecked(&fscache_n_acquires_null);
39835 _leave(" [no parent]");
39836 return NULL;
39837 }
39838 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39839 /* allocate and initialise a cookie */
39840 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39841 if (!cookie) {
39842 - fscache_stat(&fscache_n_acquires_oom);
39843 + fscache_stat_unchecked(&fscache_n_acquires_oom);
39844 _leave(" [ENOMEM]");
39845 return NULL;
39846 }
39847 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39848
39849 switch (cookie->def->type) {
39850 case FSCACHE_COOKIE_TYPE_INDEX:
39851 - fscache_stat(&fscache_n_cookie_index);
39852 + fscache_stat_unchecked(&fscache_n_cookie_index);
39853 break;
39854 case FSCACHE_COOKIE_TYPE_DATAFILE:
39855 - fscache_stat(&fscache_n_cookie_data);
39856 + fscache_stat_unchecked(&fscache_n_cookie_data);
39857 break;
39858 default:
39859 - fscache_stat(&fscache_n_cookie_special);
39860 + fscache_stat_unchecked(&fscache_n_cookie_special);
39861 break;
39862 }
39863
39864 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39865 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39866 atomic_dec(&parent->n_children);
39867 __fscache_cookie_put(cookie);
39868 - fscache_stat(&fscache_n_acquires_nobufs);
39869 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39870 _leave(" = NULL");
39871 return NULL;
39872 }
39873 }
39874
39875 - fscache_stat(&fscache_n_acquires_ok);
39876 + fscache_stat_unchecked(&fscache_n_acquires_ok);
39877 _leave(" = %p", cookie);
39878 return cookie;
39879 }
39880 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39881 cache = fscache_select_cache_for_object(cookie->parent);
39882 if (!cache) {
39883 up_read(&fscache_addremove_sem);
39884 - fscache_stat(&fscache_n_acquires_no_cache);
39885 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39886 _leave(" = -ENOMEDIUM [no cache]");
39887 return -ENOMEDIUM;
39888 }
39889 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39890 object = cache->ops->alloc_object(cache, cookie);
39891 fscache_stat_d(&fscache_n_cop_alloc_object);
39892 if (IS_ERR(object)) {
39893 - fscache_stat(&fscache_n_object_no_alloc);
39894 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
39895 ret = PTR_ERR(object);
39896 goto error;
39897 }
39898
39899 - fscache_stat(&fscache_n_object_alloc);
39900 + fscache_stat_unchecked(&fscache_n_object_alloc);
39901
39902 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39903
39904 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39905 struct fscache_object *object;
39906 struct hlist_node *_p;
39907
39908 - fscache_stat(&fscache_n_updates);
39909 + fscache_stat_unchecked(&fscache_n_updates);
39910
39911 if (!cookie) {
39912 - fscache_stat(&fscache_n_updates_null);
39913 + fscache_stat_unchecked(&fscache_n_updates_null);
39914 _leave(" [no cookie]");
39915 return;
39916 }
39917 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39918 struct fscache_object *object;
39919 unsigned long event;
39920
39921 - fscache_stat(&fscache_n_relinquishes);
39922 + fscache_stat_unchecked(&fscache_n_relinquishes);
39923 if (retire)
39924 - fscache_stat(&fscache_n_relinquishes_retire);
39925 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39926
39927 if (!cookie) {
39928 - fscache_stat(&fscache_n_relinquishes_null);
39929 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
39930 _leave(" [no cookie]");
39931 return;
39932 }
39933 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39934
39935 /* wait for the cookie to finish being instantiated (or to fail) */
39936 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39937 - fscache_stat(&fscache_n_relinquishes_waitcrt);
39938 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39939 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39940 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39941 }
39942 diff -urNp linux-3.0.4/fs/fscache/internal.h linux-3.0.4/fs/fscache/internal.h
39943 --- linux-3.0.4/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
39944 +++ linux-3.0.4/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
39945 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
39946 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39947 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39948
39949 -extern atomic_t fscache_n_op_pend;
39950 -extern atomic_t fscache_n_op_run;
39951 -extern atomic_t fscache_n_op_enqueue;
39952 -extern atomic_t fscache_n_op_deferred_release;
39953 -extern atomic_t fscache_n_op_release;
39954 -extern atomic_t fscache_n_op_gc;
39955 -extern atomic_t fscache_n_op_cancelled;
39956 -extern atomic_t fscache_n_op_rejected;
39957 -
39958 -extern atomic_t fscache_n_attr_changed;
39959 -extern atomic_t fscache_n_attr_changed_ok;
39960 -extern atomic_t fscache_n_attr_changed_nobufs;
39961 -extern atomic_t fscache_n_attr_changed_nomem;
39962 -extern atomic_t fscache_n_attr_changed_calls;
39963 -
39964 -extern atomic_t fscache_n_allocs;
39965 -extern atomic_t fscache_n_allocs_ok;
39966 -extern atomic_t fscache_n_allocs_wait;
39967 -extern atomic_t fscache_n_allocs_nobufs;
39968 -extern atomic_t fscache_n_allocs_intr;
39969 -extern atomic_t fscache_n_allocs_object_dead;
39970 -extern atomic_t fscache_n_alloc_ops;
39971 -extern atomic_t fscache_n_alloc_op_waits;
39972 -
39973 -extern atomic_t fscache_n_retrievals;
39974 -extern atomic_t fscache_n_retrievals_ok;
39975 -extern atomic_t fscache_n_retrievals_wait;
39976 -extern atomic_t fscache_n_retrievals_nodata;
39977 -extern atomic_t fscache_n_retrievals_nobufs;
39978 -extern atomic_t fscache_n_retrievals_intr;
39979 -extern atomic_t fscache_n_retrievals_nomem;
39980 -extern atomic_t fscache_n_retrievals_object_dead;
39981 -extern atomic_t fscache_n_retrieval_ops;
39982 -extern atomic_t fscache_n_retrieval_op_waits;
39983 -
39984 -extern atomic_t fscache_n_stores;
39985 -extern atomic_t fscache_n_stores_ok;
39986 -extern atomic_t fscache_n_stores_again;
39987 -extern atomic_t fscache_n_stores_nobufs;
39988 -extern atomic_t fscache_n_stores_oom;
39989 -extern atomic_t fscache_n_store_ops;
39990 -extern atomic_t fscache_n_store_calls;
39991 -extern atomic_t fscache_n_store_pages;
39992 -extern atomic_t fscache_n_store_radix_deletes;
39993 -extern atomic_t fscache_n_store_pages_over_limit;
39994 -
39995 -extern atomic_t fscache_n_store_vmscan_not_storing;
39996 -extern atomic_t fscache_n_store_vmscan_gone;
39997 -extern atomic_t fscache_n_store_vmscan_busy;
39998 -extern atomic_t fscache_n_store_vmscan_cancelled;
39999 -
40000 -extern atomic_t fscache_n_marks;
40001 -extern atomic_t fscache_n_uncaches;
40002 -
40003 -extern atomic_t fscache_n_acquires;
40004 -extern atomic_t fscache_n_acquires_null;
40005 -extern atomic_t fscache_n_acquires_no_cache;
40006 -extern atomic_t fscache_n_acquires_ok;
40007 -extern atomic_t fscache_n_acquires_nobufs;
40008 -extern atomic_t fscache_n_acquires_oom;
40009 -
40010 -extern atomic_t fscache_n_updates;
40011 -extern atomic_t fscache_n_updates_null;
40012 -extern atomic_t fscache_n_updates_run;
40013 -
40014 -extern atomic_t fscache_n_relinquishes;
40015 -extern atomic_t fscache_n_relinquishes_null;
40016 -extern atomic_t fscache_n_relinquishes_waitcrt;
40017 -extern atomic_t fscache_n_relinquishes_retire;
40018 -
40019 -extern atomic_t fscache_n_cookie_index;
40020 -extern atomic_t fscache_n_cookie_data;
40021 -extern atomic_t fscache_n_cookie_special;
40022 -
40023 -extern atomic_t fscache_n_object_alloc;
40024 -extern atomic_t fscache_n_object_no_alloc;
40025 -extern atomic_t fscache_n_object_lookups;
40026 -extern atomic_t fscache_n_object_lookups_negative;
40027 -extern atomic_t fscache_n_object_lookups_positive;
40028 -extern atomic_t fscache_n_object_lookups_timed_out;
40029 -extern atomic_t fscache_n_object_created;
40030 -extern atomic_t fscache_n_object_avail;
40031 -extern atomic_t fscache_n_object_dead;
40032 -
40033 -extern atomic_t fscache_n_checkaux_none;
40034 -extern atomic_t fscache_n_checkaux_okay;
40035 -extern atomic_t fscache_n_checkaux_update;
40036 -extern atomic_t fscache_n_checkaux_obsolete;
40037 +extern atomic_unchecked_t fscache_n_op_pend;
40038 +extern atomic_unchecked_t fscache_n_op_run;
40039 +extern atomic_unchecked_t fscache_n_op_enqueue;
40040 +extern atomic_unchecked_t fscache_n_op_deferred_release;
40041 +extern atomic_unchecked_t fscache_n_op_release;
40042 +extern atomic_unchecked_t fscache_n_op_gc;
40043 +extern atomic_unchecked_t fscache_n_op_cancelled;
40044 +extern atomic_unchecked_t fscache_n_op_rejected;
40045 +
40046 +extern atomic_unchecked_t fscache_n_attr_changed;
40047 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
40048 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
40049 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
40050 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
40051 +
40052 +extern atomic_unchecked_t fscache_n_allocs;
40053 +extern atomic_unchecked_t fscache_n_allocs_ok;
40054 +extern atomic_unchecked_t fscache_n_allocs_wait;
40055 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
40056 +extern atomic_unchecked_t fscache_n_allocs_intr;
40057 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
40058 +extern atomic_unchecked_t fscache_n_alloc_ops;
40059 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
40060 +
40061 +extern atomic_unchecked_t fscache_n_retrievals;
40062 +extern atomic_unchecked_t fscache_n_retrievals_ok;
40063 +extern atomic_unchecked_t fscache_n_retrievals_wait;
40064 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
40065 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
40066 +extern atomic_unchecked_t fscache_n_retrievals_intr;
40067 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
40068 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
40069 +extern atomic_unchecked_t fscache_n_retrieval_ops;
40070 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
40071 +
40072 +extern atomic_unchecked_t fscache_n_stores;
40073 +extern atomic_unchecked_t fscache_n_stores_ok;
40074 +extern atomic_unchecked_t fscache_n_stores_again;
40075 +extern atomic_unchecked_t fscache_n_stores_nobufs;
40076 +extern atomic_unchecked_t fscache_n_stores_oom;
40077 +extern atomic_unchecked_t fscache_n_store_ops;
40078 +extern atomic_unchecked_t fscache_n_store_calls;
40079 +extern atomic_unchecked_t fscache_n_store_pages;
40080 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
40081 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
40082 +
40083 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40084 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
40085 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
40086 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40087 +
40088 +extern atomic_unchecked_t fscache_n_marks;
40089 +extern atomic_unchecked_t fscache_n_uncaches;
40090 +
40091 +extern atomic_unchecked_t fscache_n_acquires;
40092 +extern atomic_unchecked_t fscache_n_acquires_null;
40093 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
40094 +extern atomic_unchecked_t fscache_n_acquires_ok;
40095 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
40096 +extern atomic_unchecked_t fscache_n_acquires_oom;
40097 +
40098 +extern atomic_unchecked_t fscache_n_updates;
40099 +extern atomic_unchecked_t fscache_n_updates_null;
40100 +extern atomic_unchecked_t fscache_n_updates_run;
40101 +
40102 +extern atomic_unchecked_t fscache_n_relinquishes;
40103 +extern atomic_unchecked_t fscache_n_relinquishes_null;
40104 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40105 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
40106 +
40107 +extern atomic_unchecked_t fscache_n_cookie_index;
40108 +extern atomic_unchecked_t fscache_n_cookie_data;
40109 +extern atomic_unchecked_t fscache_n_cookie_special;
40110 +
40111 +extern atomic_unchecked_t fscache_n_object_alloc;
40112 +extern atomic_unchecked_t fscache_n_object_no_alloc;
40113 +extern atomic_unchecked_t fscache_n_object_lookups;
40114 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
40115 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
40116 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
40117 +extern atomic_unchecked_t fscache_n_object_created;
40118 +extern atomic_unchecked_t fscache_n_object_avail;
40119 +extern atomic_unchecked_t fscache_n_object_dead;
40120 +
40121 +extern atomic_unchecked_t fscache_n_checkaux_none;
40122 +extern atomic_unchecked_t fscache_n_checkaux_okay;
40123 +extern atomic_unchecked_t fscache_n_checkaux_update;
40124 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
40125
40126 extern atomic_t fscache_n_cop_alloc_object;
40127 extern atomic_t fscache_n_cop_lookup_object;
40128 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
40129 atomic_inc(stat);
40130 }
40131
40132 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
40133 +{
40134 + atomic_inc_unchecked(stat);
40135 +}
40136 +
40137 static inline void fscache_stat_d(atomic_t *stat)
40138 {
40139 atomic_dec(stat);
40140 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
40141
40142 #define __fscache_stat(stat) (NULL)
40143 #define fscache_stat(stat) do {} while (0)
40144 +#define fscache_stat_unchecked(stat) do {} while (0)
40145 #define fscache_stat_d(stat) do {} while (0)
40146 #endif
40147
40148 diff -urNp linux-3.0.4/fs/fscache/object.c linux-3.0.4/fs/fscache/object.c
40149 --- linux-3.0.4/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
40150 +++ linux-3.0.4/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
40151 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
40152 /* update the object metadata on disk */
40153 case FSCACHE_OBJECT_UPDATING:
40154 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
40155 - fscache_stat(&fscache_n_updates_run);
40156 + fscache_stat_unchecked(&fscache_n_updates_run);
40157 fscache_stat(&fscache_n_cop_update_object);
40158 object->cache->ops->update_object(object);
40159 fscache_stat_d(&fscache_n_cop_update_object);
40160 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
40161 spin_lock(&object->lock);
40162 object->state = FSCACHE_OBJECT_DEAD;
40163 spin_unlock(&object->lock);
40164 - fscache_stat(&fscache_n_object_dead);
40165 + fscache_stat_unchecked(&fscache_n_object_dead);
40166 goto terminal_transit;
40167
40168 /* handle the parent cache of this object being withdrawn from
40169 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
40170 spin_lock(&object->lock);
40171 object->state = FSCACHE_OBJECT_DEAD;
40172 spin_unlock(&object->lock);
40173 - fscache_stat(&fscache_n_object_dead);
40174 + fscache_stat_unchecked(&fscache_n_object_dead);
40175 goto terminal_transit;
40176
40177 /* complain about the object being woken up once it is
40178 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
40179 parent->cookie->def->name, cookie->def->name,
40180 object->cache->tag->name);
40181
40182 - fscache_stat(&fscache_n_object_lookups);
40183 + fscache_stat_unchecked(&fscache_n_object_lookups);
40184 fscache_stat(&fscache_n_cop_lookup_object);
40185 ret = object->cache->ops->lookup_object(object);
40186 fscache_stat_d(&fscache_n_cop_lookup_object);
40187 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
40188 if (ret == -ETIMEDOUT) {
40189 /* probably stuck behind another object, so move this one to
40190 * the back of the queue */
40191 - fscache_stat(&fscache_n_object_lookups_timed_out);
40192 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
40193 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
40194 }
40195
40196 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
40197
40198 spin_lock(&object->lock);
40199 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
40200 - fscache_stat(&fscache_n_object_lookups_negative);
40201 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
40202
40203 /* transit here to allow write requests to begin stacking up
40204 * and read requests to begin returning ENODATA */
40205 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
40206 * result, in which case there may be data available */
40207 spin_lock(&object->lock);
40208 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
40209 - fscache_stat(&fscache_n_object_lookups_positive);
40210 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
40211
40212 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
40213
40214 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
40215 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
40216 } else {
40217 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
40218 - fscache_stat(&fscache_n_object_created);
40219 + fscache_stat_unchecked(&fscache_n_object_created);
40220
40221 object->state = FSCACHE_OBJECT_AVAILABLE;
40222 spin_unlock(&object->lock);
40223 @@ -602,7 +602,7 @@ static void fscache_object_available(str
40224 fscache_enqueue_dependents(object);
40225
40226 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
40227 - fscache_stat(&fscache_n_object_avail);
40228 + fscache_stat_unchecked(&fscache_n_object_avail);
40229
40230 _leave("");
40231 }
40232 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
40233 enum fscache_checkaux result;
40234
40235 if (!object->cookie->def->check_aux) {
40236 - fscache_stat(&fscache_n_checkaux_none);
40237 + fscache_stat_unchecked(&fscache_n_checkaux_none);
40238 return FSCACHE_CHECKAUX_OKAY;
40239 }
40240
40241 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
40242 switch (result) {
40243 /* entry okay as is */
40244 case FSCACHE_CHECKAUX_OKAY:
40245 - fscache_stat(&fscache_n_checkaux_okay);
40246 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
40247 break;
40248
40249 /* entry requires update */
40250 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
40251 - fscache_stat(&fscache_n_checkaux_update);
40252 + fscache_stat_unchecked(&fscache_n_checkaux_update);
40253 break;
40254
40255 /* entry requires deletion */
40256 case FSCACHE_CHECKAUX_OBSOLETE:
40257 - fscache_stat(&fscache_n_checkaux_obsolete);
40258 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
40259 break;
40260
40261 default:
40262 diff -urNp linux-3.0.4/fs/fscache/operation.c linux-3.0.4/fs/fscache/operation.c
40263 --- linux-3.0.4/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
40264 +++ linux-3.0.4/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
40265 @@ -17,7 +17,7 @@
40266 #include <linux/slab.h>
40267 #include "internal.h"
40268
40269 -atomic_t fscache_op_debug_id;
40270 +atomic_unchecked_t fscache_op_debug_id;
40271 EXPORT_SYMBOL(fscache_op_debug_id);
40272
40273 /**
40274 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
40275 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
40276 ASSERTCMP(atomic_read(&op->usage), >, 0);
40277
40278 - fscache_stat(&fscache_n_op_enqueue);
40279 + fscache_stat_unchecked(&fscache_n_op_enqueue);
40280 switch (op->flags & FSCACHE_OP_TYPE) {
40281 case FSCACHE_OP_ASYNC:
40282 _debug("queue async");
40283 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
40284 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
40285 if (op->processor)
40286 fscache_enqueue_operation(op);
40287 - fscache_stat(&fscache_n_op_run);
40288 + fscache_stat_unchecked(&fscache_n_op_run);
40289 }
40290
40291 /*
40292 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
40293 if (object->n_ops > 1) {
40294 atomic_inc(&op->usage);
40295 list_add_tail(&op->pend_link, &object->pending_ops);
40296 - fscache_stat(&fscache_n_op_pend);
40297 + fscache_stat_unchecked(&fscache_n_op_pend);
40298 } else if (!list_empty(&object->pending_ops)) {
40299 atomic_inc(&op->usage);
40300 list_add_tail(&op->pend_link, &object->pending_ops);
40301 - fscache_stat(&fscache_n_op_pend);
40302 + fscache_stat_unchecked(&fscache_n_op_pend);
40303 fscache_start_operations(object);
40304 } else {
40305 ASSERTCMP(object->n_in_progress, ==, 0);
40306 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
40307 object->n_exclusive++; /* reads and writes must wait */
40308 atomic_inc(&op->usage);
40309 list_add_tail(&op->pend_link, &object->pending_ops);
40310 - fscache_stat(&fscache_n_op_pend);
40311 + fscache_stat_unchecked(&fscache_n_op_pend);
40312 ret = 0;
40313 } else {
40314 /* not allowed to submit ops in any other state */
40315 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
40316 if (object->n_exclusive > 0) {
40317 atomic_inc(&op->usage);
40318 list_add_tail(&op->pend_link, &object->pending_ops);
40319 - fscache_stat(&fscache_n_op_pend);
40320 + fscache_stat_unchecked(&fscache_n_op_pend);
40321 } else if (!list_empty(&object->pending_ops)) {
40322 atomic_inc(&op->usage);
40323 list_add_tail(&op->pend_link, &object->pending_ops);
40324 - fscache_stat(&fscache_n_op_pend);
40325 + fscache_stat_unchecked(&fscache_n_op_pend);
40326 fscache_start_operations(object);
40327 } else {
40328 ASSERTCMP(object->n_exclusive, ==, 0);
40329 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
40330 object->n_ops++;
40331 atomic_inc(&op->usage);
40332 list_add_tail(&op->pend_link, &object->pending_ops);
40333 - fscache_stat(&fscache_n_op_pend);
40334 + fscache_stat_unchecked(&fscache_n_op_pend);
40335 ret = 0;
40336 } else if (object->state == FSCACHE_OBJECT_DYING ||
40337 object->state == FSCACHE_OBJECT_LC_DYING ||
40338 object->state == FSCACHE_OBJECT_WITHDRAWING) {
40339 - fscache_stat(&fscache_n_op_rejected);
40340 + fscache_stat_unchecked(&fscache_n_op_rejected);
40341 ret = -ENOBUFS;
40342 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
40343 fscache_report_unexpected_submission(object, op, ostate);
40344 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
40345
40346 ret = -EBUSY;
40347 if (!list_empty(&op->pend_link)) {
40348 - fscache_stat(&fscache_n_op_cancelled);
40349 + fscache_stat_unchecked(&fscache_n_op_cancelled);
40350 list_del_init(&op->pend_link);
40351 object->n_ops--;
40352 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
40353 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
40354 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
40355 BUG();
40356
40357 - fscache_stat(&fscache_n_op_release);
40358 + fscache_stat_unchecked(&fscache_n_op_release);
40359
40360 if (op->release) {
40361 op->release(op);
40362 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
40363 * lock, and defer it otherwise */
40364 if (!spin_trylock(&object->lock)) {
40365 _debug("defer put");
40366 - fscache_stat(&fscache_n_op_deferred_release);
40367 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
40368
40369 cache = object->cache;
40370 spin_lock(&cache->op_gc_list_lock);
40371 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
40372
40373 _debug("GC DEFERRED REL OBJ%x OP%x",
40374 object->debug_id, op->debug_id);
40375 - fscache_stat(&fscache_n_op_gc);
40376 + fscache_stat_unchecked(&fscache_n_op_gc);
40377
40378 ASSERTCMP(atomic_read(&op->usage), ==, 0);
40379
40380 diff -urNp linux-3.0.4/fs/fscache/page.c linux-3.0.4/fs/fscache/page.c
40381 --- linux-3.0.4/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
40382 +++ linux-3.0.4/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
40383 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
40384 val = radix_tree_lookup(&cookie->stores, page->index);
40385 if (!val) {
40386 rcu_read_unlock();
40387 - fscache_stat(&fscache_n_store_vmscan_not_storing);
40388 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
40389 __fscache_uncache_page(cookie, page);
40390 return true;
40391 }
40392 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
40393 spin_unlock(&cookie->stores_lock);
40394
40395 if (xpage) {
40396 - fscache_stat(&fscache_n_store_vmscan_cancelled);
40397 - fscache_stat(&fscache_n_store_radix_deletes);
40398 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
40399 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
40400 ASSERTCMP(xpage, ==, page);
40401 } else {
40402 - fscache_stat(&fscache_n_store_vmscan_gone);
40403 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
40404 }
40405
40406 wake_up_bit(&cookie->flags, 0);
40407 @@ -107,7 +107,7 @@ page_busy:
40408 /* we might want to wait here, but that could deadlock the allocator as
40409 * the work threads writing to the cache may all end up sleeping
40410 * on memory allocation */
40411 - fscache_stat(&fscache_n_store_vmscan_busy);
40412 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
40413 return false;
40414 }
40415 EXPORT_SYMBOL(__fscache_maybe_release_page);
40416 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
40417 FSCACHE_COOKIE_STORING_TAG);
40418 if (!radix_tree_tag_get(&cookie->stores, page->index,
40419 FSCACHE_COOKIE_PENDING_TAG)) {
40420 - fscache_stat(&fscache_n_store_radix_deletes);
40421 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
40422 xpage = radix_tree_delete(&cookie->stores, page->index);
40423 }
40424 spin_unlock(&cookie->stores_lock);
40425 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
40426
40427 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
40428
40429 - fscache_stat(&fscache_n_attr_changed_calls);
40430 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
40431
40432 if (fscache_object_is_active(object)) {
40433 fscache_stat(&fscache_n_cop_attr_changed);
40434 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
40435
40436 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40437
40438 - fscache_stat(&fscache_n_attr_changed);
40439 + fscache_stat_unchecked(&fscache_n_attr_changed);
40440
40441 op = kzalloc(sizeof(*op), GFP_KERNEL);
40442 if (!op) {
40443 - fscache_stat(&fscache_n_attr_changed_nomem);
40444 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
40445 _leave(" = -ENOMEM");
40446 return -ENOMEM;
40447 }
40448 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
40449 if (fscache_submit_exclusive_op(object, op) < 0)
40450 goto nobufs;
40451 spin_unlock(&cookie->lock);
40452 - fscache_stat(&fscache_n_attr_changed_ok);
40453 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
40454 fscache_put_operation(op);
40455 _leave(" = 0");
40456 return 0;
40457 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
40458 nobufs:
40459 spin_unlock(&cookie->lock);
40460 kfree(op);
40461 - fscache_stat(&fscache_n_attr_changed_nobufs);
40462 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
40463 _leave(" = %d", -ENOBUFS);
40464 return -ENOBUFS;
40465 }
40466 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
40467 /* allocate a retrieval operation and attempt to submit it */
40468 op = kzalloc(sizeof(*op), GFP_NOIO);
40469 if (!op) {
40470 - fscache_stat(&fscache_n_retrievals_nomem);
40471 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40472 return NULL;
40473 }
40474
40475 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
40476 return 0;
40477 }
40478
40479 - fscache_stat(&fscache_n_retrievals_wait);
40480 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
40481
40482 jif = jiffies;
40483 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
40484 fscache_wait_bit_interruptible,
40485 TASK_INTERRUPTIBLE) != 0) {
40486 - fscache_stat(&fscache_n_retrievals_intr);
40487 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
40488 _leave(" = -ERESTARTSYS");
40489 return -ERESTARTSYS;
40490 }
40491 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
40492 */
40493 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
40494 struct fscache_retrieval *op,
40495 - atomic_t *stat_op_waits,
40496 - atomic_t *stat_object_dead)
40497 + atomic_unchecked_t *stat_op_waits,
40498 + atomic_unchecked_t *stat_object_dead)
40499 {
40500 int ret;
40501
40502 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
40503 goto check_if_dead;
40504
40505 _debug(">>> WT");
40506 - fscache_stat(stat_op_waits);
40507 + fscache_stat_unchecked(stat_op_waits);
40508 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
40509 fscache_wait_bit_interruptible,
40510 TASK_INTERRUPTIBLE) < 0) {
40511 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
40512
40513 check_if_dead:
40514 if (unlikely(fscache_object_is_dead(object))) {
40515 - fscache_stat(stat_object_dead);
40516 + fscache_stat_unchecked(stat_object_dead);
40517 return -ENOBUFS;
40518 }
40519 return 0;
40520 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
40521
40522 _enter("%p,%p,,,", cookie, page);
40523
40524 - fscache_stat(&fscache_n_retrievals);
40525 + fscache_stat_unchecked(&fscache_n_retrievals);
40526
40527 if (hlist_empty(&cookie->backing_objects))
40528 goto nobufs;
40529 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
40530 goto nobufs_unlock;
40531 spin_unlock(&cookie->lock);
40532
40533 - fscache_stat(&fscache_n_retrieval_ops);
40534 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
40535
40536 /* pin the netfs read context in case we need to do the actual netfs
40537 * read because we've encountered a cache read failure */
40538 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
40539
40540 error:
40541 if (ret == -ENOMEM)
40542 - fscache_stat(&fscache_n_retrievals_nomem);
40543 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40544 else if (ret == -ERESTARTSYS)
40545 - fscache_stat(&fscache_n_retrievals_intr);
40546 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
40547 else if (ret == -ENODATA)
40548 - fscache_stat(&fscache_n_retrievals_nodata);
40549 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40550 else if (ret < 0)
40551 - fscache_stat(&fscache_n_retrievals_nobufs);
40552 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40553 else
40554 - fscache_stat(&fscache_n_retrievals_ok);
40555 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
40556
40557 fscache_put_retrieval(op);
40558 _leave(" = %d", ret);
40559 @@ -429,7 +429,7 @@ nobufs_unlock:
40560 spin_unlock(&cookie->lock);
40561 kfree(op);
40562 nobufs:
40563 - fscache_stat(&fscache_n_retrievals_nobufs);
40564 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40565 _leave(" = -ENOBUFS");
40566 return -ENOBUFS;
40567 }
40568 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
40569
40570 _enter("%p,,%d,,,", cookie, *nr_pages);
40571
40572 - fscache_stat(&fscache_n_retrievals);
40573 + fscache_stat_unchecked(&fscache_n_retrievals);
40574
40575 if (hlist_empty(&cookie->backing_objects))
40576 goto nobufs;
40577 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
40578 goto nobufs_unlock;
40579 spin_unlock(&cookie->lock);
40580
40581 - fscache_stat(&fscache_n_retrieval_ops);
40582 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
40583
40584 /* pin the netfs read context in case we need to do the actual netfs
40585 * read because we've encountered a cache read failure */
40586 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
40587
40588 error:
40589 if (ret == -ENOMEM)
40590 - fscache_stat(&fscache_n_retrievals_nomem);
40591 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40592 else if (ret == -ERESTARTSYS)
40593 - fscache_stat(&fscache_n_retrievals_intr);
40594 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
40595 else if (ret == -ENODATA)
40596 - fscache_stat(&fscache_n_retrievals_nodata);
40597 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40598 else if (ret < 0)
40599 - fscache_stat(&fscache_n_retrievals_nobufs);
40600 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40601 else
40602 - fscache_stat(&fscache_n_retrievals_ok);
40603 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
40604
40605 fscache_put_retrieval(op);
40606 _leave(" = %d", ret);
40607 @@ -545,7 +545,7 @@ nobufs_unlock:
40608 spin_unlock(&cookie->lock);
40609 kfree(op);
40610 nobufs:
40611 - fscache_stat(&fscache_n_retrievals_nobufs);
40612 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40613 _leave(" = -ENOBUFS");
40614 return -ENOBUFS;
40615 }
40616 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
40617
40618 _enter("%p,%p,,,", cookie, page);
40619
40620 - fscache_stat(&fscache_n_allocs);
40621 + fscache_stat_unchecked(&fscache_n_allocs);
40622
40623 if (hlist_empty(&cookie->backing_objects))
40624 goto nobufs;
40625 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
40626 goto nobufs_unlock;
40627 spin_unlock(&cookie->lock);
40628
40629 - fscache_stat(&fscache_n_alloc_ops);
40630 + fscache_stat_unchecked(&fscache_n_alloc_ops);
40631
40632 ret = fscache_wait_for_retrieval_activation(
40633 object, op,
40634 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
40635
40636 error:
40637 if (ret == -ERESTARTSYS)
40638 - fscache_stat(&fscache_n_allocs_intr);
40639 + fscache_stat_unchecked(&fscache_n_allocs_intr);
40640 else if (ret < 0)
40641 - fscache_stat(&fscache_n_allocs_nobufs);
40642 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40643 else
40644 - fscache_stat(&fscache_n_allocs_ok);
40645 + fscache_stat_unchecked(&fscache_n_allocs_ok);
40646
40647 fscache_put_retrieval(op);
40648 _leave(" = %d", ret);
40649 @@ -625,7 +625,7 @@ nobufs_unlock:
40650 spin_unlock(&cookie->lock);
40651 kfree(op);
40652 nobufs:
40653 - fscache_stat(&fscache_n_allocs_nobufs);
40654 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40655 _leave(" = -ENOBUFS");
40656 return -ENOBUFS;
40657 }
40658 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
40659
40660 spin_lock(&cookie->stores_lock);
40661
40662 - fscache_stat(&fscache_n_store_calls);
40663 + fscache_stat_unchecked(&fscache_n_store_calls);
40664
40665 /* find a page to store */
40666 page = NULL;
40667 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
40668 page = results[0];
40669 _debug("gang %d [%lx]", n, page->index);
40670 if (page->index > op->store_limit) {
40671 - fscache_stat(&fscache_n_store_pages_over_limit);
40672 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40673 goto superseded;
40674 }
40675
40676 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
40677 spin_unlock(&cookie->stores_lock);
40678 spin_unlock(&object->lock);
40679
40680 - fscache_stat(&fscache_n_store_pages);
40681 + fscache_stat_unchecked(&fscache_n_store_pages);
40682 fscache_stat(&fscache_n_cop_write_page);
40683 ret = object->cache->ops->write_page(op, page);
40684 fscache_stat_d(&fscache_n_cop_write_page);
40685 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
40686 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40687 ASSERT(PageFsCache(page));
40688
40689 - fscache_stat(&fscache_n_stores);
40690 + fscache_stat_unchecked(&fscache_n_stores);
40691
40692 op = kzalloc(sizeof(*op), GFP_NOIO);
40693 if (!op)
40694 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
40695 spin_unlock(&cookie->stores_lock);
40696 spin_unlock(&object->lock);
40697
40698 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40699 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40700 op->store_limit = object->store_limit;
40701
40702 if (fscache_submit_op(object, &op->op) < 0)
40703 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
40704
40705 spin_unlock(&cookie->lock);
40706 radix_tree_preload_end();
40707 - fscache_stat(&fscache_n_store_ops);
40708 - fscache_stat(&fscache_n_stores_ok);
40709 + fscache_stat_unchecked(&fscache_n_store_ops);
40710 + fscache_stat_unchecked(&fscache_n_stores_ok);
40711
40712 /* the work queue now carries its own ref on the object */
40713 fscache_put_operation(&op->op);
40714 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
40715 return 0;
40716
40717 already_queued:
40718 - fscache_stat(&fscache_n_stores_again);
40719 + fscache_stat_unchecked(&fscache_n_stores_again);
40720 already_pending:
40721 spin_unlock(&cookie->stores_lock);
40722 spin_unlock(&object->lock);
40723 spin_unlock(&cookie->lock);
40724 radix_tree_preload_end();
40725 kfree(op);
40726 - fscache_stat(&fscache_n_stores_ok);
40727 + fscache_stat_unchecked(&fscache_n_stores_ok);
40728 _leave(" = 0");
40729 return 0;
40730
40731 @@ -851,14 +851,14 @@ nobufs:
40732 spin_unlock(&cookie->lock);
40733 radix_tree_preload_end();
40734 kfree(op);
40735 - fscache_stat(&fscache_n_stores_nobufs);
40736 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
40737 _leave(" = -ENOBUFS");
40738 return -ENOBUFS;
40739
40740 nomem_free:
40741 kfree(op);
40742 nomem:
40743 - fscache_stat(&fscache_n_stores_oom);
40744 + fscache_stat_unchecked(&fscache_n_stores_oom);
40745 _leave(" = -ENOMEM");
40746 return -ENOMEM;
40747 }
40748 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
40749 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40750 ASSERTCMP(page, !=, NULL);
40751
40752 - fscache_stat(&fscache_n_uncaches);
40753 + fscache_stat_unchecked(&fscache_n_uncaches);
40754
40755 /* cache withdrawal may beat us to it */
40756 if (!PageFsCache(page))
40757 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
40758 unsigned long loop;
40759
40760 #ifdef CONFIG_FSCACHE_STATS
40761 - atomic_add(pagevec->nr, &fscache_n_marks);
40762 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40763 #endif
40764
40765 for (loop = 0; loop < pagevec->nr; loop++) {
40766 diff -urNp linux-3.0.4/fs/fscache/stats.c linux-3.0.4/fs/fscache/stats.c
40767 --- linux-3.0.4/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
40768 +++ linux-3.0.4/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
40769 @@ -18,95 +18,95 @@
40770 /*
40771 * operation counters
40772 */
40773 -atomic_t fscache_n_op_pend;
40774 -atomic_t fscache_n_op_run;
40775 -atomic_t fscache_n_op_enqueue;
40776 -atomic_t fscache_n_op_requeue;
40777 -atomic_t fscache_n_op_deferred_release;
40778 -atomic_t fscache_n_op_release;
40779 -atomic_t fscache_n_op_gc;
40780 -atomic_t fscache_n_op_cancelled;
40781 -atomic_t fscache_n_op_rejected;
40782 -
40783 -atomic_t fscache_n_attr_changed;
40784 -atomic_t fscache_n_attr_changed_ok;
40785 -atomic_t fscache_n_attr_changed_nobufs;
40786 -atomic_t fscache_n_attr_changed_nomem;
40787 -atomic_t fscache_n_attr_changed_calls;
40788 -
40789 -atomic_t fscache_n_allocs;
40790 -atomic_t fscache_n_allocs_ok;
40791 -atomic_t fscache_n_allocs_wait;
40792 -atomic_t fscache_n_allocs_nobufs;
40793 -atomic_t fscache_n_allocs_intr;
40794 -atomic_t fscache_n_allocs_object_dead;
40795 -atomic_t fscache_n_alloc_ops;
40796 -atomic_t fscache_n_alloc_op_waits;
40797 -
40798 -atomic_t fscache_n_retrievals;
40799 -atomic_t fscache_n_retrievals_ok;
40800 -atomic_t fscache_n_retrievals_wait;
40801 -atomic_t fscache_n_retrievals_nodata;
40802 -atomic_t fscache_n_retrievals_nobufs;
40803 -atomic_t fscache_n_retrievals_intr;
40804 -atomic_t fscache_n_retrievals_nomem;
40805 -atomic_t fscache_n_retrievals_object_dead;
40806 -atomic_t fscache_n_retrieval_ops;
40807 -atomic_t fscache_n_retrieval_op_waits;
40808 -
40809 -atomic_t fscache_n_stores;
40810 -atomic_t fscache_n_stores_ok;
40811 -atomic_t fscache_n_stores_again;
40812 -atomic_t fscache_n_stores_nobufs;
40813 -atomic_t fscache_n_stores_oom;
40814 -atomic_t fscache_n_store_ops;
40815 -atomic_t fscache_n_store_calls;
40816 -atomic_t fscache_n_store_pages;
40817 -atomic_t fscache_n_store_radix_deletes;
40818 -atomic_t fscache_n_store_pages_over_limit;
40819 -
40820 -atomic_t fscache_n_store_vmscan_not_storing;
40821 -atomic_t fscache_n_store_vmscan_gone;
40822 -atomic_t fscache_n_store_vmscan_busy;
40823 -atomic_t fscache_n_store_vmscan_cancelled;
40824 -
40825 -atomic_t fscache_n_marks;
40826 -atomic_t fscache_n_uncaches;
40827 -
40828 -atomic_t fscache_n_acquires;
40829 -atomic_t fscache_n_acquires_null;
40830 -atomic_t fscache_n_acquires_no_cache;
40831 -atomic_t fscache_n_acquires_ok;
40832 -atomic_t fscache_n_acquires_nobufs;
40833 -atomic_t fscache_n_acquires_oom;
40834 -
40835 -atomic_t fscache_n_updates;
40836 -atomic_t fscache_n_updates_null;
40837 -atomic_t fscache_n_updates_run;
40838 -
40839 -atomic_t fscache_n_relinquishes;
40840 -atomic_t fscache_n_relinquishes_null;
40841 -atomic_t fscache_n_relinquishes_waitcrt;
40842 -atomic_t fscache_n_relinquishes_retire;
40843 -
40844 -atomic_t fscache_n_cookie_index;
40845 -atomic_t fscache_n_cookie_data;
40846 -atomic_t fscache_n_cookie_special;
40847 -
40848 -atomic_t fscache_n_object_alloc;
40849 -atomic_t fscache_n_object_no_alloc;
40850 -atomic_t fscache_n_object_lookups;
40851 -atomic_t fscache_n_object_lookups_negative;
40852 -atomic_t fscache_n_object_lookups_positive;
40853 -atomic_t fscache_n_object_lookups_timed_out;
40854 -atomic_t fscache_n_object_created;
40855 -atomic_t fscache_n_object_avail;
40856 -atomic_t fscache_n_object_dead;
40857 -
40858 -atomic_t fscache_n_checkaux_none;
40859 -atomic_t fscache_n_checkaux_okay;
40860 -atomic_t fscache_n_checkaux_update;
40861 -atomic_t fscache_n_checkaux_obsolete;
40862 +atomic_unchecked_t fscache_n_op_pend;
40863 +atomic_unchecked_t fscache_n_op_run;
40864 +atomic_unchecked_t fscache_n_op_enqueue;
40865 +atomic_unchecked_t fscache_n_op_requeue;
40866 +atomic_unchecked_t fscache_n_op_deferred_release;
40867 +atomic_unchecked_t fscache_n_op_release;
40868 +atomic_unchecked_t fscache_n_op_gc;
40869 +atomic_unchecked_t fscache_n_op_cancelled;
40870 +atomic_unchecked_t fscache_n_op_rejected;
40871 +
40872 +atomic_unchecked_t fscache_n_attr_changed;
40873 +atomic_unchecked_t fscache_n_attr_changed_ok;
40874 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
40875 +atomic_unchecked_t fscache_n_attr_changed_nomem;
40876 +atomic_unchecked_t fscache_n_attr_changed_calls;
40877 +
40878 +atomic_unchecked_t fscache_n_allocs;
40879 +atomic_unchecked_t fscache_n_allocs_ok;
40880 +atomic_unchecked_t fscache_n_allocs_wait;
40881 +atomic_unchecked_t fscache_n_allocs_nobufs;
40882 +atomic_unchecked_t fscache_n_allocs_intr;
40883 +atomic_unchecked_t fscache_n_allocs_object_dead;
40884 +atomic_unchecked_t fscache_n_alloc_ops;
40885 +atomic_unchecked_t fscache_n_alloc_op_waits;
40886 +
40887 +atomic_unchecked_t fscache_n_retrievals;
40888 +atomic_unchecked_t fscache_n_retrievals_ok;
40889 +atomic_unchecked_t fscache_n_retrievals_wait;
40890 +atomic_unchecked_t fscache_n_retrievals_nodata;
40891 +atomic_unchecked_t fscache_n_retrievals_nobufs;
40892 +atomic_unchecked_t fscache_n_retrievals_intr;
40893 +atomic_unchecked_t fscache_n_retrievals_nomem;
40894 +atomic_unchecked_t fscache_n_retrievals_object_dead;
40895 +atomic_unchecked_t fscache_n_retrieval_ops;
40896 +atomic_unchecked_t fscache_n_retrieval_op_waits;
40897 +
40898 +atomic_unchecked_t fscache_n_stores;
40899 +atomic_unchecked_t fscache_n_stores_ok;
40900 +atomic_unchecked_t fscache_n_stores_again;
40901 +atomic_unchecked_t fscache_n_stores_nobufs;
40902 +atomic_unchecked_t fscache_n_stores_oom;
40903 +atomic_unchecked_t fscache_n_store_ops;
40904 +atomic_unchecked_t fscache_n_store_calls;
40905 +atomic_unchecked_t fscache_n_store_pages;
40906 +atomic_unchecked_t fscache_n_store_radix_deletes;
40907 +atomic_unchecked_t fscache_n_store_pages_over_limit;
40908 +
40909 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40910 +atomic_unchecked_t fscache_n_store_vmscan_gone;
40911 +atomic_unchecked_t fscache_n_store_vmscan_busy;
40912 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40913 +
40914 +atomic_unchecked_t fscache_n_marks;
40915 +atomic_unchecked_t fscache_n_uncaches;
40916 +
40917 +atomic_unchecked_t fscache_n_acquires;
40918 +atomic_unchecked_t fscache_n_acquires_null;
40919 +atomic_unchecked_t fscache_n_acquires_no_cache;
40920 +atomic_unchecked_t fscache_n_acquires_ok;
40921 +atomic_unchecked_t fscache_n_acquires_nobufs;
40922 +atomic_unchecked_t fscache_n_acquires_oom;
40923 +
40924 +atomic_unchecked_t fscache_n_updates;
40925 +atomic_unchecked_t fscache_n_updates_null;
40926 +atomic_unchecked_t fscache_n_updates_run;
40927 +
40928 +atomic_unchecked_t fscache_n_relinquishes;
40929 +atomic_unchecked_t fscache_n_relinquishes_null;
40930 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40931 +atomic_unchecked_t fscache_n_relinquishes_retire;
40932 +
40933 +atomic_unchecked_t fscache_n_cookie_index;
40934 +atomic_unchecked_t fscache_n_cookie_data;
40935 +atomic_unchecked_t fscache_n_cookie_special;
40936 +
40937 +atomic_unchecked_t fscache_n_object_alloc;
40938 +atomic_unchecked_t fscache_n_object_no_alloc;
40939 +atomic_unchecked_t fscache_n_object_lookups;
40940 +atomic_unchecked_t fscache_n_object_lookups_negative;
40941 +atomic_unchecked_t fscache_n_object_lookups_positive;
40942 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
40943 +atomic_unchecked_t fscache_n_object_created;
40944 +atomic_unchecked_t fscache_n_object_avail;
40945 +atomic_unchecked_t fscache_n_object_dead;
40946 +
40947 +atomic_unchecked_t fscache_n_checkaux_none;
40948 +atomic_unchecked_t fscache_n_checkaux_okay;
40949 +atomic_unchecked_t fscache_n_checkaux_update;
40950 +atomic_unchecked_t fscache_n_checkaux_obsolete;
40951
40952 atomic_t fscache_n_cop_alloc_object;
40953 atomic_t fscache_n_cop_lookup_object;
40954 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40955 seq_puts(m, "FS-Cache statistics\n");
40956
40957 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40958 - atomic_read(&fscache_n_cookie_index),
40959 - atomic_read(&fscache_n_cookie_data),
40960 - atomic_read(&fscache_n_cookie_special));
40961 + atomic_read_unchecked(&fscache_n_cookie_index),
40962 + atomic_read_unchecked(&fscache_n_cookie_data),
40963 + atomic_read_unchecked(&fscache_n_cookie_special));
40964
40965 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40966 - atomic_read(&fscache_n_object_alloc),
40967 - atomic_read(&fscache_n_object_no_alloc),
40968 - atomic_read(&fscache_n_object_avail),
40969 - atomic_read(&fscache_n_object_dead));
40970 + atomic_read_unchecked(&fscache_n_object_alloc),
40971 + atomic_read_unchecked(&fscache_n_object_no_alloc),
40972 + atomic_read_unchecked(&fscache_n_object_avail),
40973 + atomic_read_unchecked(&fscache_n_object_dead));
40974 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40975 - atomic_read(&fscache_n_checkaux_none),
40976 - atomic_read(&fscache_n_checkaux_okay),
40977 - atomic_read(&fscache_n_checkaux_update),
40978 - atomic_read(&fscache_n_checkaux_obsolete));
40979 + atomic_read_unchecked(&fscache_n_checkaux_none),
40980 + atomic_read_unchecked(&fscache_n_checkaux_okay),
40981 + atomic_read_unchecked(&fscache_n_checkaux_update),
40982 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40983
40984 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40985 - atomic_read(&fscache_n_marks),
40986 - atomic_read(&fscache_n_uncaches));
40987 + atomic_read_unchecked(&fscache_n_marks),
40988 + atomic_read_unchecked(&fscache_n_uncaches));
40989
40990 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40991 " oom=%u\n",
40992 - atomic_read(&fscache_n_acquires),
40993 - atomic_read(&fscache_n_acquires_null),
40994 - atomic_read(&fscache_n_acquires_no_cache),
40995 - atomic_read(&fscache_n_acquires_ok),
40996 - atomic_read(&fscache_n_acquires_nobufs),
40997 - atomic_read(&fscache_n_acquires_oom));
40998 + atomic_read_unchecked(&fscache_n_acquires),
40999 + atomic_read_unchecked(&fscache_n_acquires_null),
41000 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
41001 + atomic_read_unchecked(&fscache_n_acquires_ok),
41002 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
41003 + atomic_read_unchecked(&fscache_n_acquires_oom));
41004
41005 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
41006 - atomic_read(&fscache_n_object_lookups),
41007 - atomic_read(&fscache_n_object_lookups_negative),
41008 - atomic_read(&fscache_n_object_lookups_positive),
41009 - atomic_read(&fscache_n_object_created),
41010 - atomic_read(&fscache_n_object_lookups_timed_out));
41011 + atomic_read_unchecked(&fscache_n_object_lookups),
41012 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
41013 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
41014 + atomic_read_unchecked(&fscache_n_object_created),
41015 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
41016
41017 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
41018 - atomic_read(&fscache_n_updates),
41019 - atomic_read(&fscache_n_updates_null),
41020 - atomic_read(&fscache_n_updates_run));
41021 + atomic_read_unchecked(&fscache_n_updates),
41022 + atomic_read_unchecked(&fscache_n_updates_null),
41023 + atomic_read_unchecked(&fscache_n_updates_run));
41024
41025 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
41026 - atomic_read(&fscache_n_relinquishes),
41027 - atomic_read(&fscache_n_relinquishes_null),
41028 - atomic_read(&fscache_n_relinquishes_waitcrt),
41029 - atomic_read(&fscache_n_relinquishes_retire));
41030 + atomic_read_unchecked(&fscache_n_relinquishes),
41031 + atomic_read_unchecked(&fscache_n_relinquishes_null),
41032 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
41033 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
41034
41035 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
41036 - atomic_read(&fscache_n_attr_changed),
41037 - atomic_read(&fscache_n_attr_changed_ok),
41038 - atomic_read(&fscache_n_attr_changed_nobufs),
41039 - atomic_read(&fscache_n_attr_changed_nomem),
41040 - atomic_read(&fscache_n_attr_changed_calls));
41041 + atomic_read_unchecked(&fscache_n_attr_changed),
41042 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
41043 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
41044 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
41045 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
41046
41047 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
41048 - atomic_read(&fscache_n_allocs),
41049 - atomic_read(&fscache_n_allocs_ok),
41050 - atomic_read(&fscache_n_allocs_wait),
41051 - atomic_read(&fscache_n_allocs_nobufs),
41052 - atomic_read(&fscache_n_allocs_intr));
41053 + atomic_read_unchecked(&fscache_n_allocs),
41054 + atomic_read_unchecked(&fscache_n_allocs_ok),
41055 + atomic_read_unchecked(&fscache_n_allocs_wait),
41056 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
41057 + atomic_read_unchecked(&fscache_n_allocs_intr));
41058 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
41059 - atomic_read(&fscache_n_alloc_ops),
41060 - atomic_read(&fscache_n_alloc_op_waits),
41061 - atomic_read(&fscache_n_allocs_object_dead));
41062 + atomic_read_unchecked(&fscache_n_alloc_ops),
41063 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
41064 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
41065
41066 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
41067 " int=%u oom=%u\n",
41068 - atomic_read(&fscache_n_retrievals),
41069 - atomic_read(&fscache_n_retrievals_ok),
41070 - atomic_read(&fscache_n_retrievals_wait),
41071 - atomic_read(&fscache_n_retrievals_nodata),
41072 - atomic_read(&fscache_n_retrievals_nobufs),
41073 - atomic_read(&fscache_n_retrievals_intr),
41074 - atomic_read(&fscache_n_retrievals_nomem));
41075 + atomic_read_unchecked(&fscache_n_retrievals),
41076 + atomic_read_unchecked(&fscache_n_retrievals_ok),
41077 + atomic_read_unchecked(&fscache_n_retrievals_wait),
41078 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
41079 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
41080 + atomic_read_unchecked(&fscache_n_retrievals_intr),
41081 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
41082 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
41083 - atomic_read(&fscache_n_retrieval_ops),
41084 - atomic_read(&fscache_n_retrieval_op_waits),
41085 - atomic_read(&fscache_n_retrievals_object_dead));
41086 + atomic_read_unchecked(&fscache_n_retrieval_ops),
41087 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
41088 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
41089
41090 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
41091 - atomic_read(&fscache_n_stores),
41092 - atomic_read(&fscache_n_stores_ok),
41093 - atomic_read(&fscache_n_stores_again),
41094 - atomic_read(&fscache_n_stores_nobufs),
41095 - atomic_read(&fscache_n_stores_oom));
41096 + atomic_read_unchecked(&fscache_n_stores),
41097 + atomic_read_unchecked(&fscache_n_stores_ok),
41098 + atomic_read_unchecked(&fscache_n_stores_again),
41099 + atomic_read_unchecked(&fscache_n_stores_nobufs),
41100 + atomic_read_unchecked(&fscache_n_stores_oom));
41101 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
41102 - atomic_read(&fscache_n_store_ops),
41103 - atomic_read(&fscache_n_store_calls),
41104 - atomic_read(&fscache_n_store_pages),
41105 - atomic_read(&fscache_n_store_radix_deletes),
41106 - atomic_read(&fscache_n_store_pages_over_limit));
41107 + atomic_read_unchecked(&fscache_n_store_ops),
41108 + atomic_read_unchecked(&fscache_n_store_calls),
41109 + atomic_read_unchecked(&fscache_n_store_pages),
41110 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
41111 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
41112
41113 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
41114 - atomic_read(&fscache_n_store_vmscan_not_storing),
41115 - atomic_read(&fscache_n_store_vmscan_gone),
41116 - atomic_read(&fscache_n_store_vmscan_busy),
41117 - atomic_read(&fscache_n_store_vmscan_cancelled));
41118 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
41119 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
41120 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
41121 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
41122
41123 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
41124 - atomic_read(&fscache_n_op_pend),
41125 - atomic_read(&fscache_n_op_run),
41126 - atomic_read(&fscache_n_op_enqueue),
41127 - atomic_read(&fscache_n_op_cancelled),
41128 - atomic_read(&fscache_n_op_rejected));
41129 + atomic_read_unchecked(&fscache_n_op_pend),
41130 + atomic_read_unchecked(&fscache_n_op_run),
41131 + atomic_read_unchecked(&fscache_n_op_enqueue),
41132 + atomic_read_unchecked(&fscache_n_op_cancelled),
41133 + atomic_read_unchecked(&fscache_n_op_rejected));
41134 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
41135 - atomic_read(&fscache_n_op_deferred_release),
41136 - atomic_read(&fscache_n_op_release),
41137 - atomic_read(&fscache_n_op_gc));
41138 + atomic_read_unchecked(&fscache_n_op_deferred_release),
41139 + atomic_read_unchecked(&fscache_n_op_release),
41140 + atomic_read_unchecked(&fscache_n_op_gc));
41141
41142 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
41143 atomic_read(&fscache_n_cop_alloc_object),
41144 diff -urNp linux-3.0.4/fs/fs_struct.c linux-3.0.4/fs/fs_struct.c
41145 --- linux-3.0.4/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
41146 +++ linux-3.0.4/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
41147 @@ -4,6 +4,7 @@
41148 #include <linux/path.h>
41149 #include <linux/slab.h>
41150 #include <linux/fs_struct.h>
41151 +#include <linux/grsecurity.h>
41152 #include "internal.h"
41153
41154 static inline void path_get_longterm(struct path *path)
41155 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
41156 old_root = fs->root;
41157 fs->root = *path;
41158 path_get_longterm(path);
41159 + gr_set_chroot_entries(current, path);
41160 write_seqcount_end(&fs->seq);
41161 spin_unlock(&fs->lock);
41162 if (old_root.dentry)
41163 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
41164 && fs->root.mnt == old_root->mnt) {
41165 path_get_longterm(new_root);
41166 fs->root = *new_root;
41167 + gr_set_chroot_entries(p, new_root);
41168 count++;
41169 }
41170 if (fs->pwd.dentry == old_root->dentry
41171 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
41172 spin_lock(&fs->lock);
41173 write_seqcount_begin(&fs->seq);
41174 tsk->fs = NULL;
41175 - kill = !--fs->users;
41176 + gr_clear_chroot_entries(tsk);
41177 + kill = !atomic_dec_return(&fs->users);
41178 write_seqcount_end(&fs->seq);
41179 spin_unlock(&fs->lock);
41180 task_unlock(tsk);
41181 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
41182 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
41183 /* We don't need to lock fs - think why ;-) */
41184 if (fs) {
41185 - fs->users = 1;
41186 + atomic_set(&fs->users, 1);
41187 fs->in_exec = 0;
41188 spin_lock_init(&fs->lock);
41189 seqcount_init(&fs->seq);
41190 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
41191 spin_lock(&old->lock);
41192 fs->root = old->root;
41193 path_get_longterm(&fs->root);
41194 + /* instead of calling gr_set_chroot_entries here,
41195 + we call it from every caller of this function
41196 + */
41197 fs->pwd = old->pwd;
41198 path_get_longterm(&fs->pwd);
41199 spin_unlock(&old->lock);
41200 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
41201
41202 task_lock(current);
41203 spin_lock(&fs->lock);
41204 - kill = !--fs->users;
41205 + kill = !atomic_dec_return(&fs->users);
41206 current->fs = new_fs;
41207 + gr_set_chroot_entries(current, &new_fs->root);
41208 spin_unlock(&fs->lock);
41209 task_unlock(current);
41210
41211 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
41212
41213 /* to be mentioned only in INIT_TASK */
41214 struct fs_struct init_fs = {
41215 - .users = 1,
41216 + .users = ATOMIC_INIT(1),
41217 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
41218 .seq = SEQCNT_ZERO,
41219 .umask = 0022,
41220 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
41221 task_lock(current);
41222
41223 spin_lock(&init_fs.lock);
41224 - init_fs.users++;
41225 + atomic_inc(&init_fs.users);
41226 spin_unlock(&init_fs.lock);
41227
41228 spin_lock(&fs->lock);
41229 current->fs = &init_fs;
41230 - kill = !--fs->users;
41231 + gr_set_chroot_entries(current, &current->fs->root);
41232 + kill = !atomic_dec_return(&fs->users);
41233 spin_unlock(&fs->lock);
41234
41235 task_unlock(current);
41236 diff -urNp linux-3.0.4/fs/fuse/cuse.c linux-3.0.4/fs/fuse/cuse.c
41237 --- linux-3.0.4/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
41238 +++ linux-3.0.4/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
41239 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
41240 INIT_LIST_HEAD(&cuse_conntbl[i]);
41241
41242 /* inherit and extend fuse_dev_operations */
41243 - cuse_channel_fops = fuse_dev_operations;
41244 - cuse_channel_fops.owner = THIS_MODULE;
41245 - cuse_channel_fops.open = cuse_channel_open;
41246 - cuse_channel_fops.release = cuse_channel_release;
41247 + pax_open_kernel();
41248 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
41249 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
41250 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
41251 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
41252 + pax_close_kernel();
41253
41254 cuse_class = class_create(THIS_MODULE, "cuse");
41255 if (IS_ERR(cuse_class))
41256 diff -urNp linux-3.0.4/fs/fuse/dev.c linux-3.0.4/fs/fuse/dev.c
41257 --- linux-3.0.4/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
41258 +++ linux-3.0.4/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
41259 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
41260 ret = 0;
41261 pipe_lock(pipe);
41262
41263 - if (!pipe->readers) {
41264 + if (!atomic_read(&pipe->readers)) {
41265 send_sig(SIGPIPE, current, 0);
41266 if (!ret)
41267 ret = -EPIPE;
41268 diff -urNp linux-3.0.4/fs/fuse/dir.c linux-3.0.4/fs/fuse/dir.c
41269 --- linux-3.0.4/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
41270 +++ linux-3.0.4/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
41271 @@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
41272 return link;
41273 }
41274
41275 -static void free_link(char *link)
41276 +static void free_link(const char *link)
41277 {
41278 if (!IS_ERR(link))
41279 free_page((unsigned long) link);
41280 diff -urNp linux-3.0.4/fs/gfs2/inode.c linux-3.0.4/fs/gfs2/inode.c
41281 --- linux-3.0.4/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
41282 +++ linux-3.0.4/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
41283 @@ -1525,7 +1525,7 @@ out:
41284
41285 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41286 {
41287 - char *s = nd_get_link(nd);
41288 + const char *s = nd_get_link(nd);
41289 if (!IS_ERR(s))
41290 kfree(s);
41291 }
41292 diff -urNp linux-3.0.4/fs/hfsplus/catalog.c linux-3.0.4/fs/hfsplus/catalog.c
41293 --- linux-3.0.4/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
41294 +++ linux-3.0.4/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
41295 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
41296 int err;
41297 u16 type;
41298
41299 + pax_track_stack();
41300 +
41301 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
41302 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
41303 if (err)
41304 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
41305 int entry_size;
41306 int err;
41307
41308 + pax_track_stack();
41309 +
41310 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
41311 str->name, cnid, inode->i_nlink);
41312 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
41313 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
41314 int entry_size, type;
41315 int err = 0;
41316
41317 + pax_track_stack();
41318 +
41319 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
41320 cnid, src_dir->i_ino, src_name->name,
41321 dst_dir->i_ino, dst_name->name);
41322 diff -urNp linux-3.0.4/fs/hfsplus/dir.c linux-3.0.4/fs/hfsplus/dir.c
41323 --- linux-3.0.4/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
41324 +++ linux-3.0.4/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
41325 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
41326 struct hfsplus_readdir_data *rd;
41327 u16 type;
41328
41329 + pax_track_stack();
41330 +
41331 if (filp->f_pos >= inode->i_size)
41332 return 0;
41333
41334 diff -urNp linux-3.0.4/fs/hfsplus/inode.c linux-3.0.4/fs/hfsplus/inode.c
41335 --- linux-3.0.4/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
41336 +++ linux-3.0.4/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
41337 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
41338 int res = 0;
41339 u16 type;
41340
41341 + pax_track_stack();
41342 +
41343 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
41344
41345 HFSPLUS_I(inode)->linkid = 0;
41346 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
41347 struct hfs_find_data fd;
41348 hfsplus_cat_entry entry;
41349
41350 + pax_track_stack();
41351 +
41352 if (HFSPLUS_IS_RSRC(inode))
41353 main_inode = HFSPLUS_I(inode)->rsrc_inode;
41354
41355 diff -urNp linux-3.0.4/fs/hfsplus/ioctl.c linux-3.0.4/fs/hfsplus/ioctl.c
41356 --- linux-3.0.4/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41357 +++ linux-3.0.4/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
41358 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
41359 struct hfsplus_cat_file *file;
41360 int res;
41361
41362 + pax_track_stack();
41363 +
41364 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
41365 return -EOPNOTSUPP;
41366
41367 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
41368 struct hfsplus_cat_file *file;
41369 ssize_t res = 0;
41370
41371 + pax_track_stack();
41372 +
41373 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
41374 return -EOPNOTSUPP;
41375
41376 diff -urNp linux-3.0.4/fs/hfsplus/super.c linux-3.0.4/fs/hfsplus/super.c
41377 --- linux-3.0.4/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
41378 +++ linux-3.0.4/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
41379 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
41380 struct nls_table *nls = NULL;
41381 int err;
41382
41383 + pax_track_stack();
41384 +
41385 err = -EINVAL;
41386 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
41387 if (!sbi)
41388 diff -urNp linux-3.0.4/fs/hugetlbfs/inode.c linux-3.0.4/fs/hugetlbfs/inode.c
41389 --- linux-3.0.4/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
41390 +++ linux-3.0.4/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
41391 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
41392 .kill_sb = kill_litter_super,
41393 };
41394
41395 -static struct vfsmount *hugetlbfs_vfsmount;
41396 +struct vfsmount *hugetlbfs_vfsmount;
41397
41398 static int can_do_hugetlb_shm(void)
41399 {
41400 diff -urNp linux-3.0.4/fs/inode.c linux-3.0.4/fs/inode.c
41401 --- linux-3.0.4/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
41402 +++ linux-3.0.4/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
41403 @@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
41404
41405 #ifdef CONFIG_SMP
41406 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
41407 - static atomic_t shared_last_ino;
41408 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
41409 + static atomic_unchecked_t shared_last_ino;
41410 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
41411
41412 res = next - LAST_INO_BATCH;
41413 }
41414 diff -urNp linux-3.0.4/fs/jbd/checkpoint.c linux-3.0.4/fs/jbd/checkpoint.c
41415 --- linux-3.0.4/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
41416 +++ linux-3.0.4/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
41417 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
41418 tid_t this_tid;
41419 int result;
41420
41421 + pax_track_stack();
41422 +
41423 jbd_debug(1, "Start checkpoint\n");
41424
41425 /*
41426 diff -urNp linux-3.0.4/fs/jffs2/compr_rtime.c linux-3.0.4/fs/jffs2/compr_rtime.c
41427 --- linux-3.0.4/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
41428 +++ linux-3.0.4/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
41429 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
41430 int outpos = 0;
41431 int pos=0;
41432
41433 + pax_track_stack();
41434 +
41435 memset(positions,0,sizeof(positions));
41436
41437 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
41438 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
41439 int outpos = 0;
41440 int pos=0;
41441
41442 + pax_track_stack();
41443 +
41444 memset(positions,0,sizeof(positions));
41445
41446 while (outpos<destlen) {
41447 diff -urNp linux-3.0.4/fs/jffs2/compr_rubin.c linux-3.0.4/fs/jffs2/compr_rubin.c
41448 --- linux-3.0.4/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
41449 +++ linux-3.0.4/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
41450 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
41451 int ret;
41452 uint32_t mysrclen, mydstlen;
41453
41454 + pax_track_stack();
41455 +
41456 mysrclen = *sourcelen;
41457 mydstlen = *dstlen - 8;
41458
41459 diff -urNp linux-3.0.4/fs/jffs2/erase.c linux-3.0.4/fs/jffs2/erase.c
41460 --- linux-3.0.4/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
41461 +++ linux-3.0.4/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
41462 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
41463 struct jffs2_unknown_node marker = {
41464 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
41465 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41466 - .totlen = cpu_to_je32(c->cleanmarker_size)
41467 + .totlen = cpu_to_je32(c->cleanmarker_size),
41468 + .hdr_crc = cpu_to_je32(0)
41469 };
41470
41471 jffs2_prealloc_raw_node_refs(c, jeb, 1);
41472 diff -urNp linux-3.0.4/fs/jffs2/wbuf.c linux-3.0.4/fs/jffs2/wbuf.c
41473 --- linux-3.0.4/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
41474 +++ linux-3.0.4/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
41475 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
41476 {
41477 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
41478 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41479 - .totlen = constant_cpu_to_je32(8)
41480 + .totlen = constant_cpu_to_je32(8),
41481 + .hdr_crc = constant_cpu_to_je32(0)
41482 };
41483
41484 /*
41485 diff -urNp linux-3.0.4/fs/jffs2/xattr.c linux-3.0.4/fs/jffs2/xattr.c
41486 --- linux-3.0.4/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
41487 +++ linux-3.0.4/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
41488 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
41489
41490 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
41491
41492 + pax_track_stack();
41493 +
41494 /* Phase.1 : Merge same xref */
41495 for (i=0; i < XREF_TMPHASH_SIZE; i++)
41496 xref_tmphash[i] = NULL;
41497 diff -urNp linux-3.0.4/fs/jfs/super.c linux-3.0.4/fs/jfs/super.c
41498 --- linux-3.0.4/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
41499 +++ linux-3.0.4/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
41500 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
41501
41502 jfs_inode_cachep =
41503 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
41504 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
41505 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
41506 init_once);
41507 if (jfs_inode_cachep == NULL)
41508 return -ENOMEM;
41509 diff -urNp linux-3.0.4/fs/Kconfig.binfmt linux-3.0.4/fs/Kconfig.binfmt
41510 --- linux-3.0.4/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
41511 +++ linux-3.0.4/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
41512 @@ -86,7 +86,7 @@ config HAVE_AOUT
41513
41514 config BINFMT_AOUT
41515 tristate "Kernel support for a.out and ECOFF binaries"
41516 - depends on HAVE_AOUT
41517 + depends on HAVE_AOUT && BROKEN
41518 ---help---
41519 A.out (Assembler.OUTput) is a set of formats for libraries and
41520 executables used in the earliest versions of UNIX. Linux used
41521 diff -urNp linux-3.0.4/fs/libfs.c linux-3.0.4/fs/libfs.c
41522 --- linux-3.0.4/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
41523 +++ linux-3.0.4/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
41524 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
41525
41526 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
41527 struct dentry *next;
41528 + char d_name[sizeof(next->d_iname)];
41529 + const unsigned char *name;
41530 +
41531 next = list_entry(p, struct dentry, d_u.d_child);
41532 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
41533 if (!simple_positive(next)) {
41534 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
41535
41536 spin_unlock(&next->d_lock);
41537 spin_unlock(&dentry->d_lock);
41538 - if (filldir(dirent, next->d_name.name,
41539 + name = next->d_name.name;
41540 + if (name == next->d_iname) {
41541 + memcpy(d_name, name, next->d_name.len);
41542 + name = d_name;
41543 + }
41544 + if (filldir(dirent, name,
41545 next->d_name.len, filp->f_pos,
41546 next->d_inode->i_ino,
41547 dt_type(next->d_inode)) < 0)
41548 diff -urNp linux-3.0.4/fs/lockd/clntproc.c linux-3.0.4/fs/lockd/clntproc.c
41549 --- linux-3.0.4/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
41550 +++ linux-3.0.4/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
41551 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41552 /*
41553 * Cookie counter for NLM requests
41554 */
41555 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41556 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41557
41558 void nlmclnt_next_cookie(struct nlm_cookie *c)
41559 {
41560 - u32 cookie = atomic_inc_return(&nlm_cookie);
41561 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41562
41563 memcpy(c->data, &cookie, 4);
41564 c->len=4;
41565 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41566 struct nlm_rqst reqst, *req;
41567 int status;
41568
41569 + pax_track_stack();
41570 +
41571 req = &reqst;
41572 memset(req, 0, sizeof(*req));
41573 locks_init_lock(&req->a_args.lock.fl);
41574 diff -urNp linux-3.0.4/fs/locks.c linux-3.0.4/fs/locks.c
41575 --- linux-3.0.4/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
41576 +++ linux-3.0.4/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
41577 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
41578 return;
41579
41580 if (filp->f_op && filp->f_op->flock) {
41581 - struct file_lock fl = {
41582 + struct file_lock flock = {
41583 .fl_pid = current->tgid,
41584 .fl_file = filp,
41585 .fl_flags = FL_FLOCK,
41586 .fl_type = F_UNLCK,
41587 .fl_end = OFFSET_MAX,
41588 };
41589 - filp->f_op->flock(filp, F_SETLKW, &fl);
41590 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
41591 - fl.fl_ops->fl_release_private(&fl);
41592 + filp->f_op->flock(filp, F_SETLKW, &flock);
41593 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
41594 + flock.fl_ops->fl_release_private(&flock);
41595 }
41596
41597 lock_flocks();
41598 diff -urNp linux-3.0.4/fs/logfs/super.c linux-3.0.4/fs/logfs/super.c
41599 --- linux-3.0.4/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
41600 +++ linux-3.0.4/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
41601 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
41602 struct logfs_disk_super _ds1, *ds1 = &_ds1;
41603 int err, valid0, valid1;
41604
41605 + pax_track_stack();
41606 +
41607 /* read first superblock */
41608 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
41609 if (err)
41610 diff -urNp linux-3.0.4/fs/namei.c linux-3.0.4/fs/namei.c
41611 --- linux-3.0.4/fs/namei.c 2011-07-21 22:17:23.000000000 -0400
41612 +++ linux-3.0.4/fs/namei.c 2011-08-23 21:48:14.000000000 -0400
41613 @@ -237,21 +237,31 @@ int generic_permission(struct inode *ino
41614 return ret;
41615
41616 /*
41617 - * Read/write DACs are always overridable.
41618 - * Executable DACs are overridable for all directories and
41619 - * for non-directories that have least one exec bit set.
41620 + * Searching includes executable on directories, else just read.
41621 */
41622 - if (!(mask & MAY_EXEC) || execute_ok(inode))
41623 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41624 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41625 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
41626 +#ifdef CONFIG_GRKERNSEC
41627 + if (flags & IPERM_FLAG_RCU)
41628 + return -ECHILD;
41629 +#endif
41630 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41631 return 0;
41632 + }
41633
41634 /*
41635 - * Searching includes executable on directories, else just read.
41636 + * Read/write DACs are always overridable.
41637 + * Executable DACs are overridable for all directories and
41638 + * for non-directories that have least one exec bit set.
41639 */
41640 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41641 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
41642 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
41643 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
41644 +#ifdef CONFIG_GRKERNSEC
41645 + if (flags & IPERM_FLAG_RCU)
41646 + return -ECHILD;
41647 +#endif
41648 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
41649 return 0;
41650 + }
41651
41652 return -EACCES;
41653 }
41654 @@ -547,6 +557,9 @@ static int complete_walk(struct nameidat
41655 br_read_unlock(vfsmount_lock);
41656 }
41657
41658 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41659 + return -ENOENT;
41660 +
41661 if (likely(!(nd->flags & LOOKUP_JUMPED)))
41662 return 0;
41663
41664 @@ -593,9 +606,16 @@ static inline int exec_permission(struct
41665 if (ret == -ECHILD)
41666 return ret;
41667
41668 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
41669 - ns_capable(ns, CAP_DAC_READ_SEARCH))
41670 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
41671 goto ok;
41672 + else {
41673 +#ifdef CONFIG_GRKERNSEC
41674 + if (flags & IPERM_FLAG_RCU)
41675 + return -ECHILD;
41676 +#endif
41677 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
41678 + goto ok;
41679 + }
41680
41681 return ret;
41682 ok:
41683 @@ -703,11 +723,19 @@ follow_link(struct path *link, struct na
41684 return error;
41685 }
41686
41687 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
41688 + dentry->d_inode, dentry, nd->path.mnt)) {
41689 + error = -EACCES;
41690 + *p = ERR_PTR(error); /* no ->put_link(), please */
41691 + path_put(&nd->path);
41692 + return error;
41693 + }
41694 +
41695 nd->last_type = LAST_BIND;
41696 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
41697 error = PTR_ERR(*p);
41698 if (!IS_ERR(*p)) {
41699 - char *s = nd_get_link(nd);
41700 + const char *s = nd_get_link(nd);
41701 error = 0;
41702 if (s)
41703 error = __vfs_follow_link(nd, s);
41704 @@ -1625,6 +1653,9 @@ static int do_path_lookup(int dfd, const
41705 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
41706
41707 if (likely(!retval)) {
41708 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41709 + return -ENOENT;
41710 +
41711 if (unlikely(!audit_dummy_context())) {
41712 if (nd->path.dentry && nd->inode)
41713 audit_inode(name, nd->path.dentry);
41714 @@ -1935,6 +1966,30 @@ int vfs_create(struct inode *dir, struct
41715 return error;
41716 }
41717
41718 +/*
41719 + * Note that while the flag value (low two bits) for sys_open means:
41720 + * 00 - read-only
41721 + * 01 - write-only
41722 + * 10 - read-write
41723 + * 11 - special
41724 + * it is changed into
41725 + * 00 - no permissions needed
41726 + * 01 - read-permission
41727 + * 10 - write-permission
41728 + * 11 - read-write
41729 + * for the internal routines (ie open_namei()/follow_link() etc)
41730 + * This is more logical, and also allows the 00 "no perm needed"
41731 + * to be used for symlinks (where the permissions are checked
41732 + * later).
41733 + *
41734 +*/
41735 +static inline int open_to_namei_flags(int flag)
41736 +{
41737 + if ((flag+1) & O_ACCMODE)
41738 + flag++;
41739 + return flag;
41740 +}
41741 +
41742 static int may_open(struct path *path, int acc_mode, int flag)
41743 {
41744 struct dentry *dentry = path->dentry;
41745 @@ -1987,7 +2042,27 @@ static int may_open(struct path *path, i
41746 /*
41747 * Ensure there are no outstanding leases on the file.
41748 */
41749 - return break_lease(inode, flag);
41750 + error = break_lease(inode, flag);
41751 +
41752 + if (error)
41753 + return error;
41754 +
41755 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41756 + error = -EPERM;
41757 + goto exit;
41758 + }
41759 +
41760 + if (gr_handle_rawio(inode)) {
41761 + error = -EPERM;
41762 + goto exit;
41763 + }
41764 +
41765 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
41766 + error = -EACCES;
41767 + goto exit;
41768 + }
41769 +exit:
41770 + return error;
41771 }
41772
41773 static int handle_truncate(struct file *filp)
41774 @@ -2013,30 +2088,6 @@ static int handle_truncate(struct file *
41775 }
41776
41777 /*
41778 - * Note that while the flag value (low two bits) for sys_open means:
41779 - * 00 - read-only
41780 - * 01 - write-only
41781 - * 10 - read-write
41782 - * 11 - special
41783 - * it is changed into
41784 - * 00 - no permissions needed
41785 - * 01 - read-permission
41786 - * 10 - write-permission
41787 - * 11 - read-write
41788 - * for the internal routines (ie open_namei()/follow_link() etc)
41789 - * This is more logical, and also allows the 00 "no perm needed"
41790 - * to be used for symlinks (where the permissions are checked
41791 - * later).
41792 - *
41793 -*/
41794 -static inline int open_to_namei_flags(int flag)
41795 -{
41796 - if ((flag+1) & O_ACCMODE)
41797 - flag++;
41798 - return flag;
41799 -}
41800 -
41801 -/*
41802 * Handle the last step of open()
41803 */
41804 static struct file *do_last(struct nameidata *nd, struct path *path,
41805 @@ -2045,6 +2096,7 @@ static struct file *do_last(struct namei
41806 struct dentry *dir = nd->path.dentry;
41807 struct dentry *dentry;
41808 int open_flag = op->open_flag;
41809 + int flag = open_to_namei_flags(open_flag);
41810 int will_truncate = open_flag & O_TRUNC;
41811 int want_write = 0;
41812 int acc_mode = op->acc_mode;
41813 @@ -2132,6 +2184,12 @@ static struct file *do_last(struct namei
41814 /* Negative dentry, just create the file */
41815 if (!dentry->d_inode) {
41816 int mode = op->mode;
41817 +
41818 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
41819 + error = -EACCES;
41820 + goto exit_mutex_unlock;
41821 + }
41822 +
41823 if (!IS_POSIXACL(dir->d_inode))
41824 mode &= ~current_umask();
41825 /*
41826 @@ -2155,6 +2213,8 @@ static struct file *do_last(struct namei
41827 error = vfs_create(dir->d_inode, dentry, mode, nd);
41828 if (error)
41829 goto exit_mutex_unlock;
41830 + else
41831 + gr_handle_create(path->dentry, path->mnt);
41832 mutex_unlock(&dir->d_inode->i_mutex);
41833 dput(nd->path.dentry);
41834 nd->path.dentry = dentry;
41835 @@ -2164,6 +2224,14 @@ static struct file *do_last(struct namei
41836 /*
41837 * It already exists.
41838 */
41839 +
41840 + /* only check if O_CREAT is specified, all other checks need to go
41841 + into may_open */
41842 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
41843 + error = -EACCES;
41844 + goto exit_mutex_unlock;
41845 + }
41846 +
41847 mutex_unlock(&dir->d_inode->i_mutex);
41848 audit_inode(pathname, path->dentry);
41849
41850 @@ -2450,6 +2518,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41851 error = may_mknod(mode);
41852 if (error)
41853 goto out_dput;
41854 +
41855 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41856 + error = -EPERM;
41857 + goto out_dput;
41858 + }
41859 +
41860 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41861 + error = -EACCES;
41862 + goto out_dput;
41863 + }
41864 +
41865 error = mnt_want_write(nd.path.mnt);
41866 if (error)
41867 goto out_dput;
41868 @@ -2470,6 +2549,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41869 }
41870 out_drop_write:
41871 mnt_drop_write(nd.path.mnt);
41872 +
41873 + if (!error)
41874 + gr_handle_create(dentry, nd.path.mnt);
41875 out_dput:
41876 dput(dentry);
41877 out_unlock:
41878 @@ -2522,6 +2604,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41879 if (IS_ERR(dentry))
41880 goto out_unlock;
41881
41882 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41883 + error = -EACCES;
41884 + goto out_dput;
41885 + }
41886 +
41887 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41888 mode &= ~current_umask();
41889 error = mnt_want_write(nd.path.mnt);
41890 @@ -2533,6 +2620,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41891 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41892 out_drop_write:
41893 mnt_drop_write(nd.path.mnt);
41894 +
41895 + if (!error)
41896 + gr_handle_create(dentry, nd.path.mnt);
41897 +
41898 out_dput:
41899 dput(dentry);
41900 out_unlock:
41901 @@ -2613,6 +2704,8 @@ static long do_rmdir(int dfd, const char
41902 char * name;
41903 struct dentry *dentry;
41904 struct nameidata nd;
41905 + ino_t saved_ino = 0;
41906 + dev_t saved_dev = 0;
41907
41908 error = user_path_parent(dfd, pathname, &nd, &name);
41909 if (error)
41910 @@ -2641,6 +2734,17 @@ static long do_rmdir(int dfd, const char
41911 error = -ENOENT;
41912 goto exit3;
41913 }
41914 +
41915 + if (dentry->d_inode->i_nlink <= 1) {
41916 + saved_ino = dentry->d_inode->i_ino;
41917 + saved_dev = gr_get_dev_from_dentry(dentry);
41918 + }
41919 +
41920 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41921 + error = -EACCES;
41922 + goto exit3;
41923 + }
41924 +
41925 error = mnt_want_write(nd.path.mnt);
41926 if (error)
41927 goto exit3;
41928 @@ -2648,6 +2752,8 @@ static long do_rmdir(int dfd, const char
41929 if (error)
41930 goto exit4;
41931 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41932 + if (!error && (saved_dev || saved_ino))
41933 + gr_handle_delete(saved_ino, saved_dev);
41934 exit4:
41935 mnt_drop_write(nd.path.mnt);
41936 exit3:
41937 @@ -2710,6 +2816,8 @@ static long do_unlinkat(int dfd, const c
41938 struct dentry *dentry;
41939 struct nameidata nd;
41940 struct inode *inode = NULL;
41941 + ino_t saved_ino = 0;
41942 + dev_t saved_dev = 0;
41943
41944 error = user_path_parent(dfd, pathname, &nd, &name);
41945 if (error)
41946 @@ -2732,6 +2840,16 @@ static long do_unlinkat(int dfd, const c
41947 if (!inode)
41948 goto slashes;
41949 ihold(inode);
41950 +
41951 + if (inode->i_nlink <= 1) {
41952 + saved_ino = inode->i_ino;
41953 + saved_dev = gr_get_dev_from_dentry(dentry);
41954 + }
41955 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41956 + error = -EACCES;
41957 + goto exit2;
41958 + }
41959 +
41960 error = mnt_want_write(nd.path.mnt);
41961 if (error)
41962 goto exit2;
41963 @@ -2739,6 +2857,8 @@ static long do_unlinkat(int dfd, const c
41964 if (error)
41965 goto exit3;
41966 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41967 + if (!error && (saved_ino || saved_dev))
41968 + gr_handle_delete(saved_ino, saved_dev);
41969 exit3:
41970 mnt_drop_write(nd.path.mnt);
41971 exit2:
41972 @@ -2816,6 +2936,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41973 if (IS_ERR(dentry))
41974 goto out_unlock;
41975
41976 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41977 + error = -EACCES;
41978 + goto out_dput;
41979 + }
41980 +
41981 error = mnt_want_write(nd.path.mnt);
41982 if (error)
41983 goto out_dput;
41984 @@ -2823,6 +2948,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41985 if (error)
41986 goto out_drop_write;
41987 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41988 + if (!error)
41989 + gr_handle_create(dentry, nd.path.mnt);
41990 out_drop_write:
41991 mnt_drop_write(nd.path.mnt);
41992 out_dput:
41993 @@ -2931,6 +3058,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41994 error = PTR_ERR(new_dentry);
41995 if (IS_ERR(new_dentry))
41996 goto out_unlock;
41997 +
41998 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41999 + old_path.dentry->d_inode,
42000 + old_path.dentry->d_inode->i_mode, to)) {
42001 + error = -EACCES;
42002 + goto out_dput;
42003 + }
42004 +
42005 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
42006 + old_path.dentry, old_path.mnt, to)) {
42007 + error = -EACCES;
42008 + goto out_dput;
42009 + }
42010 +
42011 error = mnt_want_write(nd.path.mnt);
42012 if (error)
42013 goto out_dput;
42014 @@ -2938,6 +3079,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
42015 if (error)
42016 goto out_drop_write;
42017 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
42018 + if (!error)
42019 + gr_handle_create(new_dentry, nd.path.mnt);
42020 out_drop_write:
42021 mnt_drop_write(nd.path.mnt);
42022 out_dput:
42023 @@ -3113,6 +3256,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42024 char *to;
42025 int error;
42026
42027 + pax_track_stack();
42028 +
42029 error = user_path_parent(olddfd, oldname, &oldnd, &from);
42030 if (error)
42031 goto exit;
42032 @@ -3169,6 +3314,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42033 if (new_dentry == trap)
42034 goto exit5;
42035
42036 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
42037 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
42038 + to);
42039 + if (error)
42040 + goto exit5;
42041 +
42042 error = mnt_want_write(oldnd.path.mnt);
42043 if (error)
42044 goto exit5;
42045 @@ -3178,6 +3329,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
42046 goto exit6;
42047 error = vfs_rename(old_dir->d_inode, old_dentry,
42048 new_dir->d_inode, new_dentry);
42049 + if (!error)
42050 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
42051 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
42052 exit6:
42053 mnt_drop_write(oldnd.path.mnt);
42054 exit5:
42055 @@ -3203,6 +3357,8 @@ SYSCALL_DEFINE2(rename, const char __use
42056
42057 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
42058 {
42059 + char tmpbuf[64];
42060 + const char *newlink;
42061 int len;
42062
42063 len = PTR_ERR(link);
42064 @@ -3212,7 +3368,14 @@ int vfs_readlink(struct dentry *dentry,
42065 len = strlen(link);
42066 if (len > (unsigned) buflen)
42067 len = buflen;
42068 - if (copy_to_user(buffer, link, len))
42069 +
42070 + if (len < sizeof(tmpbuf)) {
42071 + memcpy(tmpbuf, link, len);
42072 + newlink = tmpbuf;
42073 + } else
42074 + newlink = link;
42075 +
42076 + if (copy_to_user(buffer, newlink, len))
42077 len = -EFAULT;
42078 out:
42079 return len;
42080 diff -urNp linux-3.0.4/fs/namespace.c linux-3.0.4/fs/namespace.c
42081 --- linux-3.0.4/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
42082 +++ linux-3.0.4/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
42083 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
42084 if (!(sb->s_flags & MS_RDONLY))
42085 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
42086 up_write(&sb->s_umount);
42087 +
42088 + gr_log_remount(mnt->mnt_devname, retval);
42089 +
42090 return retval;
42091 }
42092
42093 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
42094 br_write_unlock(vfsmount_lock);
42095 up_write(&namespace_sem);
42096 release_mounts(&umount_list);
42097 +
42098 + gr_log_unmount(mnt->mnt_devname, retval);
42099 +
42100 return retval;
42101 }
42102
42103 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
42104 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
42105 MS_STRICTATIME);
42106
42107 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
42108 + retval = -EPERM;
42109 + goto dput_out;
42110 + }
42111 +
42112 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
42113 + retval = -EPERM;
42114 + goto dput_out;
42115 + }
42116 +
42117 if (flags & MS_REMOUNT)
42118 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
42119 data_page);
42120 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
42121 dev_name, data_page);
42122 dput_out:
42123 path_put(&path);
42124 +
42125 + gr_log_mount(dev_name, dir_name, retval);
42126 +
42127 return retval;
42128 }
42129
42130 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
42131 if (error)
42132 goto out2;
42133
42134 + if (gr_handle_chroot_pivot()) {
42135 + error = -EPERM;
42136 + goto out2;
42137 + }
42138 +
42139 get_fs_root(current->fs, &root);
42140 error = lock_mount(&old);
42141 if (error)
42142 diff -urNp linux-3.0.4/fs/ncpfs/dir.c linux-3.0.4/fs/ncpfs/dir.c
42143 --- linux-3.0.4/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
42144 +++ linux-3.0.4/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
42145 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
42146 int res, val = 0, len;
42147 __u8 __name[NCP_MAXPATHLEN + 1];
42148
42149 + pax_track_stack();
42150 +
42151 if (dentry == dentry->d_sb->s_root)
42152 return 1;
42153
42154 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
42155 int error, res, len;
42156 __u8 __name[NCP_MAXPATHLEN + 1];
42157
42158 + pax_track_stack();
42159 +
42160 error = -EIO;
42161 if (!ncp_conn_valid(server))
42162 goto finished;
42163 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
42164 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
42165 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
42166
42167 + pax_track_stack();
42168 +
42169 ncp_age_dentry(server, dentry);
42170 len = sizeof(__name);
42171 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
42172 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
42173 int error, len;
42174 __u8 __name[NCP_MAXPATHLEN + 1];
42175
42176 + pax_track_stack();
42177 +
42178 DPRINTK("ncp_mkdir: making %s/%s\n",
42179 dentry->d_parent->d_name.name, dentry->d_name.name);
42180
42181 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
42182 int old_len, new_len;
42183 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
42184
42185 + pax_track_stack();
42186 +
42187 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
42188 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
42189 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
42190 diff -urNp linux-3.0.4/fs/ncpfs/inode.c linux-3.0.4/fs/ncpfs/inode.c
42191 --- linux-3.0.4/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42192 +++ linux-3.0.4/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
42193 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
42194 #endif
42195 struct ncp_entry_info finfo;
42196
42197 + pax_track_stack();
42198 +
42199 memset(&data, 0, sizeof(data));
42200 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
42201 if (!server)
42202 diff -urNp linux-3.0.4/fs/nfs/inode.c linux-3.0.4/fs/nfs/inode.c
42203 --- linux-3.0.4/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
42204 +++ linux-3.0.4/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
42205 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
42206 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
42207 nfsi->attrtimeo_timestamp = jiffies;
42208
42209 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
42210 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
42211 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
42212 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
42213 else
42214 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
42215 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
42216 }
42217
42218 -static atomic_long_t nfs_attr_generation_counter;
42219 +static atomic_long_unchecked_t nfs_attr_generation_counter;
42220
42221 static unsigned long nfs_read_attr_generation_counter(void)
42222 {
42223 - return atomic_long_read(&nfs_attr_generation_counter);
42224 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
42225 }
42226
42227 unsigned long nfs_inc_attr_generation_counter(void)
42228 {
42229 - return atomic_long_inc_return(&nfs_attr_generation_counter);
42230 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
42231 }
42232
42233 void nfs_fattr_init(struct nfs_fattr *fattr)
42234 diff -urNp linux-3.0.4/fs/nfsd/nfs4state.c linux-3.0.4/fs/nfsd/nfs4state.c
42235 --- linux-3.0.4/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
42236 +++ linux-3.0.4/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
42237 @@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
42238 unsigned int strhashval;
42239 int err;
42240
42241 + pax_track_stack();
42242 +
42243 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
42244 (long long) lock->lk_offset,
42245 (long long) lock->lk_length);
42246 diff -urNp linux-3.0.4/fs/nfsd/nfs4xdr.c linux-3.0.4/fs/nfsd/nfs4xdr.c
42247 --- linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
42248 +++ linux-3.0.4/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
42249 @@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
42250 .dentry = dentry,
42251 };
42252
42253 + pax_track_stack();
42254 +
42255 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
42256 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
42257 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
42258 diff -urNp linux-3.0.4/fs/nfsd/vfs.c linux-3.0.4/fs/nfsd/vfs.c
42259 --- linux-3.0.4/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
42260 +++ linux-3.0.4/fs/nfsd/vfs.c 2011-08-23 21:47:56.000000000 -0400
42261 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
42262 } else {
42263 oldfs = get_fs();
42264 set_fs(KERNEL_DS);
42265 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
42266 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
42267 set_fs(oldfs);
42268 }
42269
42270 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
42271
42272 /* Write the data. */
42273 oldfs = get_fs(); set_fs(KERNEL_DS);
42274 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
42275 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
42276 set_fs(oldfs);
42277 if (host_err < 0)
42278 goto out_nfserr;
42279 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
42280 */
42281
42282 oldfs = get_fs(); set_fs(KERNEL_DS);
42283 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
42284 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
42285 set_fs(oldfs);
42286
42287 if (host_err < 0)
42288 diff -urNp linux-3.0.4/fs/notify/fanotify/fanotify_user.c linux-3.0.4/fs/notify/fanotify/fanotify_user.c
42289 --- linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
42290 +++ linux-3.0.4/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
42291 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
42292 goto out_close_fd;
42293
42294 ret = -EFAULT;
42295 - if (copy_to_user(buf, &fanotify_event_metadata,
42296 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
42297 + copy_to_user(buf, &fanotify_event_metadata,
42298 fanotify_event_metadata.event_len))
42299 goto out_kill_access_response;
42300
42301 diff -urNp linux-3.0.4/fs/notify/notification.c linux-3.0.4/fs/notify/notification.c
42302 --- linux-3.0.4/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
42303 +++ linux-3.0.4/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
42304 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
42305 * get set to 0 so it will never get 'freed'
42306 */
42307 static struct fsnotify_event *q_overflow_event;
42308 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
42309 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
42310
42311 /**
42312 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
42313 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
42314 */
42315 u32 fsnotify_get_cookie(void)
42316 {
42317 - return atomic_inc_return(&fsnotify_sync_cookie);
42318 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
42319 }
42320 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
42321
42322 diff -urNp linux-3.0.4/fs/ntfs/dir.c linux-3.0.4/fs/ntfs/dir.c
42323 --- linux-3.0.4/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
42324 +++ linux-3.0.4/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
42325 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
42326 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
42327 ~(s64)(ndir->itype.index.block_size - 1)));
42328 /* Bounds checks. */
42329 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42330 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42331 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
42332 "inode 0x%lx or driver bug.", vdir->i_ino);
42333 goto err_out;
42334 diff -urNp linux-3.0.4/fs/ntfs/file.c linux-3.0.4/fs/ntfs/file.c
42335 --- linux-3.0.4/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
42336 +++ linux-3.0.4/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
42337 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
42338 #endif /* NTFS_RW */
42339 };
42340
42341 -const struct file_operations ntfs_empty_file_ops = {};
42342 +const struct file_operations ntfs_empty_file_ops __read_only;
42343
42344 -const struct inode_operations ntfs_empty_inode_ops = {};
42345 +const struct inode_operations ntfs_empty_inode_ops __read_only;
42346 diff -urNp linux-3.0.4/fs/ocfs2/localalloc.c linux-3.0.4/fs/ocfs2/localalloc.c
42347 --- linux-3.0.4/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
42348 +++ linux-3.0.4/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
42349 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
42350 goto bail;
42351 }
42352
42353 - atomic_inc(&osb->alloc_stats.moves);
42354 + atomic_inc_unchecked(&osb->alloc_stats.moves);
42355
42356 bail:
42357 if (handle)
42358 diff -urNp linux-3.0.4/fs/ocfs2/namei.c linux-3.0.4/fs/ocfs2/namei.c
42359 --- linux-3.0.4/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
42360 +++ linux-3.0.4/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
42361 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
42362 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
42363 struct ocfs2_dir_lookup_result target_insert = { NULL, };
42364
42365 + pax_track_stack();
42366 +
42367 /* At some point it might be nice to break this function up a
42368 * bit. */
42369
42370 diff -urNp linux-3.0.4/fs/ocfs2/ocfs2.h linux-3.0.4/fs/ocfs2/ocfs2.h
42371 --- linux-3.0.4/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
42372 +++ linux-3.0.4/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
42373 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
42374
42375 struct ocfs2_alloc_stats
42376 {
42377 - atomic_t moves;
42378 - atomic_t local_data;
42379 - atomic_t bitmap_data;
42380 - atomic_t bg_allocs;
42381 - atomic_t bg_extends;
42382 + atomic_unchecked_t moves;
42383 + atomic_unchecked_t local_data;
42384 + atomic_unchecked_t bitmap_data;
42385 + atomic_unchecked_t bg_allocs;
42386 + atomic_unchecked_t bg_extends;
42387 };
42388
42389 enum ocfs2_local_alloc_state
42390 diff -urNp linux-3.0.4/fs/ocfs2/suballoc.c linux-3.0.4/fs/ocfs2/suballoc.c
42391 --- linux-3.0.4/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
42392 +++ linux-3.0.4/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
42393 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
42394 mlog_errno(status);
42395 goto bail;
42396 }
42397 - atomic_inc(&osb->alloc_stats.bg_extends);
42398 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
42399
42400 /* You should never ask for this much metadata */
42401 BUG_ON(bits_wanted >
42402 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
42403 mlog_errno(status);
42404 goto bail;
42405 }
42406 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42407 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42408
42409 *suballoc_loc = res.sr_bg_blkno;
42410 *suballoc_bit_start = res.sr_bit_offset;
42411 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
42412 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
42413 res->sr_bits);
42414
42415 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42416 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42417
42418 BUG_ON(res->sr_bits != 1);
42419
42420 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
42421 mlog_errno(status);
42422 goto bail;
42423 }
42424 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42425 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
42426
42427 BUG_ON(res.sr_bits != 1);
42428
42429 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
42430 cluster_start,
42431 num_clusters);
42432 if (!status)
42433 - atomic_inc(&osb->alloc_stats.local_data);
42434 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
42435 } else {
42436 if (min_clusters > (osb->bitmap_cpg - 1)) {
42437 /* The only paths asking for contiguousness
42438 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
42439 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
42440 res.sr_bg_blkno,
42441 res.sr_bit_offset);
42442 - atomic_inc(&osb->alloc_stats.bitmap_data);
42443 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
42444 *num_clusters = res.sr_bits;
42445 }
42446 }
42447 diff -urNp linux-3.0.4/fs/ocfs2/super.c linux-3.0.4/fs/ocfs2/super.c
42448 --- linux-3.0.4/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
42449 +++ linux-3.0.4/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
42450 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
42451 "%10s => GlobalAllocs: %d LocalAllocs: %d "
42452 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
42453 "Stats",
42454 - atomic_read(&osb->alloc_stats.bitmap_data),
42455 - atomic_read(&osb->alloc_stats.local_data),
42456 - atomic_read(&osb->alloc_stats.bg_allocs),
42457 - atomic_read(&osb->alloc_stats.moves),
42458 - atomic_read(&osb->alloc_stats.bg_extends));
42459 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
42460 + atomic_read_unchecked(&osb->alloc_stats.local_data),
42461 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
42462 + atomic_read_unchecked(&osb->alloc_stats.moves),
42463 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
42464
42465 out += snprintf(buf + out, len - out,
42466 "%10s => State: %u Descriptor: %llu Size: %u bits "
42467 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
42468 spin_lock_init(&osb->osb_xattr_lock);
42469 ocfs2_init_steal_slots(osb);
42470
42471 - atomic_set(&osb->alloc_stats.moves, 0);
42472 - atomic_set(&osb->alloc_stats.local_data, 0);
42473 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
42474 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
42475 - atomic_set(&osb->alloc_stats.bg_extends, 0);
42476 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
42477 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
42478 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
42479 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
42480 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
42481
42482 /* Copy the blockcheck stats from the superblock probe */
42483 osb->osb_ecc_stats = *stats;
42484 diff -urNp linux-3.0.4/fs/ocfs2/symlink.c linux-3.0.4/fs/ocfs2/symlink.c
42485 --- linux-3.0.4/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
42486 +++ linux-3.0.4/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
42487 @@ -142,7 +142,7 @@ bail:
42488
42489 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
42490 {
42491 - char *link = nd_get_link(nd);
42492 + const char *link = nd_get_link(nd);
42493 if (!IS_ERR(link))
42494 kfree(link);
42495 }
42496 diff -urNp linux-3.0.4/fs/open.c linux-3.0.4/fs/open.c
42497 --- linux-3.0.4/fs/open.c 2011-07-21 22:17:23.000000000 -0400
42498 +++ linux-3.0.4/fs/open.c 2011-09-14 09:16:46.000000000 -0400
42499 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
42500 error = locks_verify_truncate(inode, NULL, length);
42501 if (!error)
42502 error = security_path_truncate(&path);
42503 +
42504 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
42505 + error = -EACCES;
42506 +
42507 if (!error)
42508 error = do_truncate(path.dentry, length, 0, NULL);
42509
42510 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
42511 if (__mnt_is_readonly(path.mnt))
42512 res = -EROFS;
42513
42514 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
42515 + res = -EACCES;
42516 +
42517 out_path_release:
42518 path_put(&path);
42519 out:
42520 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
42521 if (error)
42522 goto dput_and_out;
42523
42524 + gr_log_chdir(path.dentry, path.mnt);
42525 +
42526 set_fs_pwd(current->fs, &path);
42527
42528 dput_and_out:
42529 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
42530 goto out_putf;
42531
42532 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
42533 +
42534 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
42535 + error = -EPERM;
42536 +
42537 + if (!error)
42538 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
42539 +
42540 if (!error)
42541 set_fs_pwd(current->fs, &file->f_path);
42542 out_putf:
42543 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
42544 if (error)
42545 goto dput_and_out;
42546
42547 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
42548 + goto dput_and_out;
42549 +
42550 set_fs_root(current->fs, &path);
42551 +
42552 + gr_handle_chroot_chdir(&path);
42553 +
42554 error = 0;
42555 dput_and_out:
42556 path_put(&path);
42557 @@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42558 err = mnt_want_write_file(file);
42559 if (err)
42560 goto out_putf;
42561 +
42562 mutex_lock(&inode->i_mutex);
42563 +
42564 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
42565 + err = -EACCES;
42566 + goto out_unlock;
42567 + }
42568 +
42569 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
42570 if (err)
42571 goto out_unlock;
42572 if (mode == (mode_t) -1)
42573 mode = inode->i_mode;
42574 +
42575 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
42576 + err = -EACCES;
42577 + goto out_unlock;
42578 + }
42579 +
42580 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42581 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42582 err = notify_change(dentry, &newattrs);
42583 @@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42584 error = mnt_want_write(path.mnt);
42585 if (error)
42586 goto dput_and_out;
42587 +
42588 mutex_lock(&inode->i_mutex);
42589 +
42590 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42591 + error = -EACCES;
42592 + goto out_unlock;
42593 + }
42594 +
42595 error = security_path_chmod(path.dentry, path.mnt, mode);
42596 if (error)
42597 goto out_unlock;
42598 if (mode == (mode_t) -1)
42599 mode = inode->i_mode;
42600 +
42601 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42602 + error = -EACCES;
42603 + goto out_unlock;
42604 + }
42605 +
42606 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42607 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42608 error = notify_change(path.dentry, &newattrs);
42609 @@ -528,6 +576,9 @@ static int chown_common(struct path *pat
42610 int error;
42611 struct iattr newattrs;
42612
42613 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
42614 + return -EACCES;
42615 +
42616 newattrs.ia_valid = ATTR_CTIME;
42617 if (user != (uid_t) -1) {
42618 newattrs.ia_valid |= ATTR_UID;
42619 @@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
42620 if (!IS_ERR(tmp)) {
42621 fd = get_unused_fd_flags(flags);
42622 if (fd >= 0) {
42623 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
42624 + struct file *f;
42625 + /* don't allow to be set by userland */
42626 + flags &= ~FMODE_GREXEC;
42627 + f = do_filp_open(dfd, tmp, &op, lookup);
42628 if (IS_ERR(f)) {
42629 put_unused_fd(fd);
42630 fd = PTR_ERR(f);
42631 diff -urNp linux-3.0.4/fs/partitions/ldm.c linux-3.0.4/fs/partitions/ldm.c
42632 --- linux-3.0.4/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
42633 +++ linux-3.0.4/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
42634 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42635 ldm_error ("A VBLK claims to have %d parts.", num);
42636 return false;
42637 }
42638 +
42639 if (rec >= num) {
42640 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42641 return false;
42642 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42643 goto found;
42644 }
42645
42646 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42647 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42648 if (!f) {
42649 ldm_crit ("Out of memory.");
42650 return false;
42651 diff -urNp linux-3.0.4/fs/pipe.c linux-3.0.4/fs/pipe.c
42652 --- linux-3.0.4/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
42653 +++ linux-3.0.4/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
42654 @@ -420,9 +420,9 @@ redo:
42655 }
42656 if (bufs) /* More to do? */
42657 continue;
42658 - if (!pipe->writers)
42659 + if (!atomic_read(&pipe->writers))
42660 break;
42661 - if (!pipe->waiting_writers) {
42662 + if (!atomic_read(&pipe->waiting_writers)) {
42663 /* syscall merging: Usually we must not sleep
42664 * if O_NONBLOCK is set, or if we got some data.
42665 * But if a writer sleeps in kernel space, then
42666 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
42667 mutex_lock(&inode->i_mutex);
42668 pipe = inode->i_pipe;
42669
42670 - if (!pipe->readers) {
42671 + if (!atomic_read(&pipe->readers)) {
42672 send_sig(SIGPIPE, current, 0);
42673 ret = -EPIPE;
42674 goto out;
42675 @@ -530,7 +530,7 @@ redo1:
42676 for (;;) {
42677 int bufs;
42678
42679 - if (!pipe->readers) {
42680 + if (!atomic_read(&pipe->readers)) {
42681 send_sig(SIGPIPE, current, 0);
42682 if (!ret)
42683 ret = -EPIPE;
42684 @@ -616,9 +616,9 @@ redo2:
42685 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42686 do_wakeup = 0;
42687 }
42688 - pipe->waiting_writers++;
42689 + atomic_inc(&pipe->waiting_writers);
42690 pipe_wait(pipe);
42691 - pipe->waiting_writers--;
42692 + atomic_dec(&pipe->waiting_writers);
42693 }
42694 out:
42695 mutex_unlock(&inode->i_mutex);
42696 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
42697 mask = 0;
42698 if (filp->f_mode & FMODE_READ) {
42699 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42700 - if (!pipe->writers && filp->f_version != pipe->w_counter)
42701 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42702 mask |= POLLHUP;
42703 }
42704
42705 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
42706 * Most Unices do not set POLLERR for FIFOs but on Linux they
42707 * behave exactly like pipes for poll().
42708 */
42709 - if (!pipe->readers)
42710 + if (!atomic_read(&pipe->readers))
42711 mask |= POLLERR;
42712 }
42713
42714 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
42715
42716 mutex_lock(&inode->i_mutex);
42717 pipe = inode->i_pipe;
42718 - pipe->readers -= decr;
42719 - pipe->writers -= decw;
42720 + atomic_sub(decr, &pipe->readers);
42721 + atomic_sub(decw, &pipe->writers);
42722
42723 - if (!pipe->readers && !pipe->writers) {
42724 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42725 free_pipe_info(inode);
42726 } else {
42727 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
42728 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
42729
42730 if (inode->i_pipe) {
42731 ret = 0;
42732 - inode->i_pipe->readers++;
42733 + atomic_inc(&inode->i_pipe->readers);
42734 }
42735
42736 mutex_unlock(&inode->i_mutex);
42737 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
42738
42739 if (inode->i_pipe) {
42740 ret = 0;
42741 - inode->i_pipe->writers++;
42742 + atomic_inc(&inode->i_pipe->writers);
42743 }
42744
42745 mutex_unlock(&inode->i_mutex);
42746 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
42747 if (inode->i_pipe) {
42748 ret = 0;
42749 if (filp->f_mode & FMODE_READ)
42750 - inode->i_pipe->readers++;
42751 + atomic_inc(&inode->i_pipe->readers);
42752 if (filp->f_mode & FMODE_WRITE)
42753 - inode->i_pipe->writers++;
42754 + atomic_inc(&inode->i_pipe->writers);
42755 }
42756
42757 mutex_unlock(&inode->i_mutex);
42758 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
42759 inode->i_pipe = NULL;
42760 }
42761
42762 -static struct vfsmount *pipe_mnt __read_mostly;
42763 +struct vfsmount *pipe_mnt __read_mostly;
42764
42765 /*
42766 * pipefs_dname() is called from d_path().
42767 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
42768 goto fail_iput;
42769 inode->i_pipe = pipe;
42770
42771 - pipe->readers = pipe->writers = 1;
42772 + atomic_set(&pipe->readers, 1);
42773 + atomic_set(&pipe->writers, 1);
42774 inode->i_fop = &rdwr_pipefifo_fops;
42775
42776 /*
42777 diff -urNp linux-3.0.4/fs/proc/array.c linux-3.0.4/fs/proc/array.c
42778 --- linux-3.0.4/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
42779 +++ linux-3.0.4/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
42780 @@ -60,6 +60,7 @@
42781 #include <linux/tty.h>
42782 #include <linux/string.h>
42783 #include <linux/mman.h>
42784 +#include <linux/grsecurity.h>
42785 #include <linux/proc_fs.h>
42786 #include <linux/ioport.h>
42787 #include <linux/uaccess.h>
42788 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
42789 seq_putc(m, '\n');
42790 }
42791
42792 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42793 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
42794 +{
42795 + if (p->mm)
42796 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42797 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42798 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42799 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42800 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42801 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42802 + else
42803 + seq_printf(m, "PaX:\t-----\n");
42804 +}
42805 +#endif
42806 +
42807 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42808 struct pid *pid, struct task_struct *task)
42809 {
42810 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
42811 task_cpus_allowed(m, task);
42812 cpuset_task_status_allowed(m, task);
42813 task_context_switch_counts(m, task);
42814 +
42815 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42816 + task_pax(m, task);
42817 +#endif
42818 +
42819 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42820 + task_grsec_rbac(m, task);
42821 +#endif
42822 +
42823 return 0;
42824 }
42825
42826 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42827 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42828 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42829 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42830 +#endif
42831 +
42832 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42833 struct pid *pid, struct task_struct *task, int whole)
42834 {
42835 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
42836 cputime_t cutime, cstime, utime, stime;
42837 cputime_t cgtime, gtime;
42838 unsigned long rsslim = 0;
42839 - char tcomm[sizeof(task->comm)];
42840 + char tcomm[sizeof(task->comm)] = { 0 };
42841 unsigned long flags;
42842
42843 + pax_track_stack();
42844 +
42845 state = *get_task_state(task);
42846 vsize = eip = esp = 0;
42847 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42848 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
42849 gtime = task->gtime;
42850 }
42851
42852 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42853 + if (PAX_RAND_FLAGS(mm)) {
42854 + eip = 0;
42855 + esp = 0;
42856 + wchan = 0;
42857 + }
42858 +#endif
42859 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42860 + wchan = 0;
42861 + eip =0;
42862 + esp =0;
42863 +#endif
42864 +
42865 /* scale priority and nice values from timeslices to -20..20 */
42866 /* to make it look like a "normal" Unix priority/nice value */
42867 priority = task_prio(task);
42868 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
42869 vsize,
42870 mm ? get_mm_rss(mm) : 0,
42871 rsslim,
42872 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42873 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42874 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42875 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42876 +#else
42877 mm ? (permitted ? mm->start_code : 1) : 0,
42878 mm ? (permitted ? mm->end_code : 1) : 0,
42879 (permitted && mm) ? mm->start_stack : 0,
42880 +#endif
42881 esp,
42882 eip,
42883 /* The signal information here is obsolete.
42884 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
42885
42886 return 0;
42887 }
42888 +
42889 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42890 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42891 +{
42892 + u32 curr_ip = 0;
42893 + unsigned long flags;
42894 +
42895 + if (lock_task_sighand(task, &flags)) {
42896 + curr_ip = task->signal->curr_ip;
42897 + unlock_task_sighand(task, &flags);
42898 + }
42899 +
42900 + return sprintf(buffer, "%pI4\n", &curr_ip);
42901 +}
42902 +#endif
42903 diff -urNp linux-3.0.4/fs/proc/base.c linux-3.0.4/fs/proc/base.c
42904 --- linux-3.0.4/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
42905 +++ linux-3.0.4/fs/proc/base.c 2011-09-13 14:50:28.000000000 -0400
42906 @@ -107,6 +107,22 @@ struct pid_entry {
42907 union proc_op op;
42908 };
42909
42910 +struct getdents_callback {
42911 + struct linux_dirent __user * current_dir;
42912 + struct linux_dirent __user * previous;
42913 + struct file * file;
42914 + int count;
42915 + int error;
42916 +};
42917 +
42918 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42919 + loff_t offset, u64 ino, unsigned int d_type)
42920 +{
42921 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
42922 + buf->error = -EINVAL;
42923 + return 0;
42924 +}
42925 +
42926 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42927 .name = (NAME), \
42928 .len = sizeof(NAME) - 1, \
42929 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
42930 if (task == current)
42931 return mm;
42932
42933 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42934 + return ERR_PTR(-EPERM);
42935 +
42936 /*
42937 * If current is actively ptrace'ing, and would also be
42938 * permitted to freshly attach with ptrace now, permit it.
42939 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
42940 if (!mm->arg_end)
42941 goto out_mm; /* Shh! No looking before we're done */
42942
42943 + if (gr_acl_handle_procpidmem(task))
42944 + goto out_mm;
42945 +
42946 len = mm->arg_end - mm->arg_start;
42947
42948 if (len > PAGE_SIZE)
42949 @@ -309,12 +331,28 @@ out:
42950 return res;
42951 }
42952
42953 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42954 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42955 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42956 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42957 +#endif
42958 +
42959 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42960 {
42961 struct mm_struct *mm = mm_for_maps(task);
42962 int res = PTR_ERR(mm);
42963 if (mm && !IS_ERR(mm)) {
42964 unsigned int nwords = 0;
42965 +
42966 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42967 + /* allow if we're currently ptracing this task */
42968 + if (PAX_RAND_FLAGS(mm) &&
42969 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42970 + mmput(mm);
42971 + return 0;
42972 + }
42973 +#endif
42974 +
42975 do {
42976 nwords += 2;
42977 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42978 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
42979 }
42980
42981
42982 -#ifdef CONFIG_KALLSYMS
42983 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42984 /*
42985 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42986 * Returns the resolved symbol. If that fails, simply return the address.
42987 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
42988 mutex_unlock(&task->signal->cred_guard_mutex);
42989 }
42990
42991 -#ifdef CONFIG_STACKTRACE
42992 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42993
42994 #define MAX_STACK_TRACE_DEPTH 64
42995
42996 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
42997 return count;
42998 }
42999
43000 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43001 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43002 static int proc_pid_syscall(struct task_struct *task, char *buffer)
43003 {
43004 long nr;
43005 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
43006 /************************************************************************/
43007
43008 /* permission checks */
43009 -static int proc_fd_access_allowed(struct inode *inode)
43010 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
43011 {
43012 struct task_struct *task;
43013 int allowed = 0;
43014 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
43015 */
43016 task = get_proc_task(inode);
43017 if (task) {
43018 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
43019 + if (log)
43020 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
43021 + else
43022 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
43023 put_task_struct(task);
43024 }
43025 return allowed;
43026 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
43027 if (!task)
43028 goto out_no_task;
43029
43030 + if (gr_acl_handle_procpidmem(task))
43031 + goto out;
43032 +
43033 ret = -ENOMEM;
43034 page = (char *)__get_free_page(GFP_TEMPORARY);
43035 if (!page)
43036 @@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
43037 path_put(&nd->path);
43038
43039 /* Are we allowed to snoop on the tasks file descriptors? */
43040 - if (!proc_fd_access_allowed(inode))
43041 + if (!proc_fd_access_allowed(inode,0))
43042 goto out;
43043
43044 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
43045 @@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
43046 struct path path;
43047
43048 /* Are we allowed to snoop on the tasks file descriptors? */
43049 - if (!proc_fd_access_allowed(inode))
43050 - goto out;
43051 + /* logging this is needed for learning on chromium to work properly,
43052 + but we don't want to flood the logs from 'ps' which does a readlink
43053 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
43054 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
43055 + */
43056 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
43057 + if (!proc_fd_access_allowed(inode,0))
43058 + goto out;
43059 + } else {
43060 + if (!proc_fd_access_allowed(inode,1))
43061 + goto out;
43062 + }
43063
43064 error = PROC_I(inode)->op.proc_get_link(inode, &path);
43065 if (error)
43066 @@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
43067 rcu_read_lock();
43068 cred = __task_cred(task);
43069 inode->i_uid = cred->euid;
43070 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43071 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43072 +#else
43073 inode->i_gid = cred->egid;
43074 +#endif
43075 rcu_read_unlock();
43076 }
43077 security_task_to_inode(task, inode);
43078 @@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
43079 struct inode *inode = dentry->d_inode;
43080 struct task_struct *task;
43081 const struct cred *cred;
43082 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43083 + const struct cred *tmpcred = current_cred();
43084 +#endif
43085
43086 generic_fillattr(inode, stat);
43087
43088 @@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
43089 stat->uid = 0;
43090 stat->gid = 0;
43091 task = pid_task(proc_pid(inode), PIDTYPE_PID);
43092 +
43093 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
43094 + rcu_read_unlock();
43095 + return -ENOENT;
43096 + }
43097 +
43098 if (task) {
43099 + cred = __task_cred(task);
43100 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43101 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
43102 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43103 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43104 +#endif
43105 + ) {
43106 +#endif
43107 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
43108 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43109 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
43110 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43111 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
43112 +#endif
43113 task_dumpable(task)) {
43114 - cred = __task_cred(task);
43115 stat->uid = cred->euid;
43116 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43117 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
43118 +#else
43119 stat->gid = cred->egid;
43120 +#endif
43121 }
43122 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43123 + } else {
43124 + rcu_read_unlock();
43125 + return -ENOENT;
43126 + }
43127 +#endif
43128 }
43129 rcu_read_unlock();
43130 return 0;
43131 @@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
43132
43133 if (task) {
43134 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
43135 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43136 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
43137 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43138 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
43139 +#endif
43140 task_dumpable(task)) {
43141 rcu_read_lock();
43142 cred = __task_cred(task);
43143 inode->i_uid = cred->euid;
43144 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43145 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43146 +#else
43147 inode->i_gid = cred->egid;
43148 +#endif
43149 rcu_read_unlock();
43150 } else {
43151 inode->i_uid = 0;
43152 @@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
43153 int fd = proc_fd(inode);
43154
43155 if (task) {
43156 - files = get_files_struct(task);
43157 + if (!gr_acl_handle_procpidmem(task))
43158 + files = get_files_struct(task);
43159 put_task_struct(task);
43160 }
43161 if (files) {
43162 @@ -2169,11 +2268,21 @@ static const struct file_operations proc
43163 */
43164 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
43165 {
43166 + struct task_struct *task;
43167 int rv = generic_permission(inode, mask, flags, NULL);
43168 - if (rv == 0)
43169 - return 0;
43170 +
43171 if (task_pid(current) == proc_pid(inode))
43172 rv = 0;
43173 +
43174 + task = get_proc_task(inode);
43175 + if (task == NULL)
43176 + return rv;
43177 +
43178 + if (gr_acl_handle_procpidmem(task))
43179 + rv = -EACCES;
43180 +
43181 + put_task_struct(task);
43182 +
43183 return rv;
43184 }
43185
43186 @@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
43187 if (!task)
43188 goto out_no_task;
43189
43190 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43191 + goto out;
43192 +
43193 /*
43194 * Yes, it does not scale. And it should not. Don't add
43195 * new entries into /proc/<tgid>/ without very good reasons.
43196 @@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
43197 if (!task)
43198 goto out_no_task;
43199
43200 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43201 + goto out;
43202 +
43203 ret = 0;
43204 i = filp->f_pos;
43205 switch (i) {
43206 @@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
43207 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
43208 void *cookie)
43209 {
43210 - char *s = nd_get_link(nd);
43211 + const char *s = nd_get_link(nd);
43212 if (!IS_ERR(s))
43213 __putname(s);
43214 }
43215 @@ -2795,7 +2910,7 @@ static const struct pid_entry tgid_base_
43216 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
43217 #endif
43218 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
43219 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43220 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43221 INF("syscall", S_IRUGO, proc_pid_syscall),
43222 #endif
43223 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43224 @@ -2820,10 +2935,10 @@ static const struct pid_entry tgid_base_
43225 #ifdef CONFIG_SECURITY
43226 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43227 #endif
43228 -#ifdef CONFIG_KALLSYMS
43229 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43230 INF("wchan", S_IRUGO, proc_pid_wchan),
43231 #endif
43232 -#ifdef CONFIG_STACKTRACE
43233 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43234 ONE("stack", S_IRUGO, proc_pid_stack),
43235 #endif
43236 #ifdef CONFIG_SCHEDSTATS
43237 @@ -2857,6 +2972,9 @@ static const struct pid_entry tgid_base_
43238 #ifdef CONFIG_HARDWALL
43239 INF("hardwall", S_IRUGO, proc_pid_hardwall),
43240 #endif
43241 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43242 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
43243 +#endif
43244 };
43245
43246 static int proc_tgid_base_readdir(struct file * filp,
43247 @@ -2982,7 +3100,14 @@ static struct dentry *proc_pid_instantia
43248 if (!inode)
43249 goto out;
43250
43251 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43252 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
43253 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43254 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43255 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
43256 +#else
43257 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
43258 +#endif
43259 inode->i_op = &proc_tgid_base_inode_operations;
43260 inode->i_fop = &proc_tgid_base_operations;
43261 inode->i_flags|=S_IMMUTABLE;
43262 @@ -3024,7 +3149,11 @@ struct dentry *proc_pid_lookup(struct in
43263 if (!task)
43264 goto out;
43265
43266 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43267 + goto out_put_task;
43268 +
43269 result = proc_pid_instantiate(dir, dentry, task, NULL);
43270 +out_put_task:
43271 put_task_struct(task);
43272 out:
43273 return result;
43274 @@ -3089,6 +3218,11 @@ int proc_pid_readdir(struct file * filp,
43275 {
43276 unsigned int nr;
43277 struct task_struct *reaper;
43278 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43279 + const struct cred *tmpcred = current_cred();
43280 + const struct cred *itercred;
43281 +#endif
43282 + filldir_t __filldir = filldir;
43283 struct tgid_iter iter;
43284 struct pid_namespace *ns;
43285
43286 @@ -3112,8 +3246,27 @@ int proc_pid_readdir(struct file * filp,
43287 for (iter = next_tgid(ns, iter);
43288 iter.task;
43289 iter.tgid += 1, iter = next_tgid(ns, iter)) {
43290 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43291 + rcu_read_lock();
43292 + itercred = __task_cred(iter.task);
43293 +#endif
43294 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
43295 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43296 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
43297 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43298 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43299 +#endif
43300 + )
43301 +#endif
43302 + )
43303 + __filldir = &gr_fake_filldir;
43304 + else
43305 + __filldir = filldir;
43306 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43307 + rcu_read_unlock();
43308 +#endif
43309 filp->f_pos = iter.tgid + TGID_OFFSET;
43310 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
43311 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
43312 put_task_struct(iter.task);
43313 goto out;
43314 }
43315 @@ -3141,7 +3294,7 @@ static const struct pid_entry tid_base_s
43316 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
43317 #endif
43318 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
43319 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43320 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43321 INF("syscall", S_IRUGO, proc_pid_syscall),
43322 #endif
43323 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43324 @@ -3165,10 +3318,10 @@ static const struct pid_entry tid_base_s
43325 #ifdef CONFIG_SECURITY
43326 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43327 #endif
43328 -#ifdef CONFIG_KALLSYMS
43329 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43330 INF("wchan", S_IRUGO, proc_pid_wchan),
43331 #endif
43332 -#ifdef CONFIG_STACKTRACE
43333 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43334 ONE("stack", S_IRUGO, proc_pid_stack),
43335 #endif
43336 #ifdef CONFIG_SCHEDSTATS
43337 diff -urNp linux-3.0.4/fs/proc/cmdline.c linux-3.0.4/fs/proc/cmdline.c
43338 --- linux-3.0.4/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
43339 +++ linux-3.0.4/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
43340 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
43341
43342 static int __init proc_cmdline_init(void)
43343 {
43344 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43345 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
43346 +#else
43347 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
43348 +#endif
43349 return 0;
43350 }
43351 module_init(proc_cmdline_init);
43352 diff -urNp linux-3.0.4/fs/proc/devices.c linux-3.0.4/fs/proc/devices.c
43353 --- linux-3.0.4/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
43354 +++ linux-3.0.4/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
43355 @@ -64,7 +64,11 @@ static const struct file_operations proc
43356
43357 static int __init proc_devices_init(void)
43358 {
43359 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43360 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
43361 +#else
43362 proc_create("devices", 0, NULL, &proc_devinfo_operations);
43363 +#endif
43364 return 0;
43365 }
43366 module_init(proc_devices_init);
43367 diff -urNp linux-3.0.4/fs/proc/inode.c linux-3.0.4/fs/proc/inode.c
43368 --- linux-3.0.4/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
43369 +++ linux-3.0.4/fs/proc/inode.c 2011-08-23 21:48:14.000000000 -0400
43370 @@ -440,7 +440,11 @@ struct inode *proc_get_inode(struct supe
43371 if (de->mode) {
43372 inode->i_mode = de->mode;
43373 inode->i_uid = de->uid;
43374 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43375 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43376 +#else
43377 inode->i_gid = de->gid;
43378 +#endif
43379 }
43380 if (de->size)
43381 inode->i_size = de->size;
43382 diff -urNp linux-3.0.4/fs/proc/internal.h linux-3.0.4/fs/proc/internal.h
43383 --- linux-3.0.4/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
43384 +++ linux-3.0.4/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
43385 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
43386 struct pid *pid, struct task_struct *task);
43387 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
43388 struct pid *pid, struct task_struct *task);
43389 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43390 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
43391 +#endif
43392 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
43393
43394 extern const struct file_operations proc_maps_operations;
43395 diff -urNp linux-3.0.4/fs/proc/Kconfig linux-3.0.4/fs/proc/Kconfig
43396 --- linux-3.0.4/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
43397 +++ linux-3.0.4/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
43398 @@ -30,12 +30,12 @@ config PROC_FS
43399
43400 config PROC_KCORE
43401 bool "/proc/kcore support" if !ARM
43402 - depends on PROC_FS && MMU
43403 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
43404
43405 config PROC_VMCORE
43406 bool "/proc/vmcore support"
43407 - depends on PROC_FS && CRASH_DUMP
43408 - default y
43409 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
43410 + default n
43411 help
43412 Exports the dump image of crashed kernel in ELF format.
43413
43414 @@ -59,8 +59,8 @@ config PROC_SYSCTL
43415 limited in memory.
43416
43417 config PROC_PAGE_MONITOR
43418 - default y
43419 - depends on PROC_FS && MMU
43420 + default n
43421 + depends on PROC_FS && MMU && !GRKERNSEC
43422 bool "Enable /proc page monitoring" if EXPERT
43423 help
43424 Various /proc files exist to monitor process memory utilization:
43425 diff -urNp linux-3.0.4/fs/proc/kcore.c linux-3.0.4/fs/proc/kcore.c
43426 --- linux-3.0.4/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
43427 +++ linux-3.0.4/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
43428 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
43429 off_t offset = 0;
43430 struct kcore_list *m;
43431
43432 + pax_track_stack();
43433 +
43434 /* setup ELF header */
43435 elf = (struct elfhdr *) bufp;
43436 bufp += sizeof(struct elfhdr);
43437 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
43438 * the addresses in the elf_phdr on our list.
43439 */
43440 start = kc_offset_to_vaddr(*fpos - elf_buflen);
43441 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
43442 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
43443 + if (tsz > buflen)
43444 tsz = buflen;
43445 -
43446 +
43447 while (buflen) {
43448 struct kcore_list *m;
43449
43450 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
43451 kfree(elf_buf);
43452 } else {
43453 if (kern_addr_valid(start)) {
43454 - unsigned long n;
43455 + char *elf_buf;
43456 + mm_segment_t oldfs;
43457
43458 - n = copy_to_user(buffer, (char *)start, tsz);
43459 - /*
43460 - * We cannot distingush between fault on source
43461 - * and fault on destination. When this happens
43462 - * we clear too and hope it will trigger the
43463 - * EFAULT again.
43464 - */
43465 - if (n) {
43466 - if (clear_user(buffer + tsz - n,
43467 - n))
43468 + elf_buf = kmalloc(tsz, GFP_KERNEL);
43469 + if (!elf_buf)
43470 + return -ENOMEM;
43471 + oldfs = get_fs();
43472 + set_fs(KERNEL_DS);
43473 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
43474 + set_fs(oldfs);
43475 + if (copy_to_user(buffer, elf_buf, tsz)) {
43476 + kfree(elf_buf);
43477 return -EFAULT;
43478 + }
43479 }
43480 + set_fs(oldfs);
43481 + kfree(elf_buf);
43482 } else {
43483 if (clear_user(buffer, tsz))
43484 return -EFAULT;
43485 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
43486
43487 static int open_kcore(struct inode *inode, struct file *filp)
43488 {
43489 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
43490 + return -EPERM;
43491 +#endif
43492 if (!capable(CAP_SYS_RAWIO))
43493 return -EPERM;
43494 if (kcore_need_update)
43495 diff -urNp linux-3.0.4/fs/proc/meminfo.c linux-3.0.4/fs/proc/meminfo.c
43496 --- linux-3.0.4/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
43497 +++ linux-3.0.4/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
43498 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
43499 unsigned long pages[NR_LRU_LISTS];
43500 int lru;
43501
43502 + pax_track_stack();
43503 +
43504 /*
43505 * display in kilobytes.
43506 */
43507 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
43508 vmi.used >> 10,
43509 vmi.largest_chunk >> 10
43510 #ifdef CONFIG_MEMORY_FAILURE
43511 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
43512 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
43513 #endif
43514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
43515 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
43516 diff -urNp linux-3.0.4/fs/proc/nommu.c linux-3.0.4/fs/proc/nommu.c
43517 --- linux-3.0.4/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
43518 +++ linux-3.0.4/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
43519 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
43520 if (len < 1)
43521 len = 1;
43522 seq_printf(m, "%*c", len, ' ');
43523 - seq_path(m, &file->f_path, "");
43524 + seq_path(m, &file->f_path, "\n\\");
43525 }
43526
43527 seq_putc(m, '\n');
43528 diff -urNp linux-3.0.4/fs/proc/proc_net.c linux-3.0.4/fs/proc/proc_net.c
43529 --- linux-3.0.4/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
43530 +++ linux-3.0.4/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
43531 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
43532 struct task_struct *task;
43533 struct nsproxy *ns;
43534 struct net *net = NULL;
43535 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43536 + const struct cred *cred = current_cred();
43537 +#endif
43538 +
43539 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43540 + if (cred->fsuid)
43541 + return net;
43542 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43543 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
43544 + return net;
43545 +#endif
43546
43547 rcu_read_lock();
43548 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43549 diff -urNp linux-3.0.4/fs/proc/proc_sysctl.c linux-3.0.4/fs/proc/proc_sysctl.c
43550 --- linux-3.0.4/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
43551 +++ linux-3.0.4/fs/proc/proc_sysctl.c 2011-08-23 21:48:14.000000000 -0400
43552 @@ -8,6 +8,8 @@
43553 #include <linux/namei.h>
43554 #include "internal.h"
43555
43556 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43557 +
43558 static const struct dentry_operations proc_sys_dentry_operations;
43559 static const struct file_operations proc_sys_file_operations;
43560 static const struct inode_operations proc_sys_inode_operations;
43561 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
43562 if (!p)
43563 goto out;
43564
43565 + if (gr_handle_sysctl(p, MAY_EXEC))
43566 + goto out;
43567 +
43568 err = ERR_PTR(-ENOMEM);
43569 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43570 if (h)
43571 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
43572 if (*pos < file->f_pos)
43573 continue;
43574
43575 + if (gr_handle_sysctl(table, 0))
43576 + continue;
43577 +
43578 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43579 if (res)
43580 return res;
43581 @@ -355,6 +363,9 @@ static int proc_sys_getattr(struct vfsmo
43582 if (IS_ERR(head))
43583 return PTR_ERR(head);
43584
43585 + if (table && gr_handle_sysctl(table, MAY_EXEC))
43586 + return -ENOENT;
43587 +
43588 generic_fillattr(inode, stat);
43589 if (table)
43590 stat->mode = (stat->mode & S_IFMT) | table->mode;
43591 diff -urNp linux-3.0.4/fs/proc/root.c linux-3.0.4/fs/proc/root.c
43592 --- linux-3.0.4/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
43593 +++ linux-3.0.4/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
43594 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
43595 #ifdef CONFIG_PROC_DEVICETREE
43596 proc_device_tree_init();
43597 #endif
43598 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43599 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43600 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43601 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43602 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43603 +#endif
43604 +#else
43605 proc_mkdir("bus", NULL);
43606 +#endif
43607 proc_sys_init();
43608 }
43609
43610 diff -urNp linux-3.0.4/fs/proc/task_mmu.c linux-3.0.4/fs/proc/task_mmu.c
43611 --- linux-3.0.4/fs/proc/task_mmu.c 2011-07-21 22:17:23.000000000 -0400
43612 +++ linux-3.0.4/fs/proc/task_mmu.c 2011-08-23 21:48:14.000000000 -0400
43613 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
43614 "VmExe:\t%8lu kB\n"
43615 "VmLib:\t%8lu kB\n"
43616 "VmPTE:\t%8lu kB\n"
43617 - "VmSwap:\t%8lu kB\n",
43618 - hiwater_vm << (PAGE_SHIFT-10),
43619 + "VmSwap:\t%8lu kB\n"
43620 +
43621 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43622 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43623 +#endif
43624 +
43625 + ,hiwater_vm << (PAGE_SHIFT-10),
43626 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43627 mm->locked_vm << (PAGE_SHIFT-10),
43628 hiwater_rss << (PAGE_SHIFT-10),
43629 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
43630 data << (PAGE_SHIFT-10),
43631 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43632 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
43633 - swap << (PAGE_SHIFT-10));
43634 + swap << (PAGE_SHIFT-10)
43635 +
43636 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43637 + , mm->context.user_cs_base, mm->context.user_cs_limit
43638 +#endif
43639 +
43640 + );
43641 }
43642
43643 unsigned long task_vsize(struct mm_struct *mm)
43644 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
43645 return ret;
43646 }
43647
43648 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43649 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43650 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
43651 + _mm->pax_flags & MF_PAX_SEGMEXEC))
43652 +#endif
43653 +
43654 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43655 {
43656 struct mm_struct *mm = vma->vm_mm;
43657 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
43658 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43659 }
43660
43661 - /* We don't show the stack guard page in /proc/maps */
43662 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43663 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
43664 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
43665 +#else
43666 start = vma->vm_start;
43667 - if (stack_guard_page_start(vma, start))
43668 - start += PAGE_SIZE;
43669 end = vma->vm_end;
43670 - if (stack_guard_page_end(vma, end))
43671 - end -= PAGE_SIZE;
43672 +#endif
43673
43674 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43675 start,
43676 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
43677 flags & VM_WRITE ? 'w' : '-',
43678 flags & VM_EXEC ? 'x' : '-',
43679 flags & VM_MAYSHARE ? 's' : 'p',
43680 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43681 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43682 +#else
43683 pgoff,
43684 +#endif
43685 MAJOR(dev), MINOR(dev), ino, &len);
43686
43687 /*
43688 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
43689 */
43690 if (file) {
43691 pad_len_spaces(m, len);
43692 - seq_path(m, &file->f_path, "\n");
43693 + seq_path(m, &file->f_path, "\n\\");
43694 } else {
43695 const char *name = arch_vma_name(vma);
43696 if (!name) {
43697 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
43698 if (vma->vm_start <= mm->brk &&
43699 vma->vm_end >= mm->start_brk) {
43700 name = "[heap]";
43701 - } else if (vma->vm_start <= mm->start_stack &&
43702 - vma->vm_end >= mm->start_stack) {
43703 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43704 + (vma->vm_start <= mm->start_stack &&
43705 + vma->vm_end >= mm->start_stack)) {
43706 name = "[stack]";
43707 }
43708 } else {
43709 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
43710 };
43711
43712 memset(&mss, 0, sizeof mss);
43713 - mss.vma = vma;
43714 - /* mmap_sem is held in m_start */
43715 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43716 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43717 -
43718 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43719 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43720 +#endif
43721 + mss.vma = vma;
43722 + /* mmap_sem is held in m_start */
43723 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43724 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43725 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43726 + }
43727 +#endif
43728 show_map_vma(m, vma);
43729
43730 seq_printf(m,
43731 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
43732 "KernelPageSize: %8lu kB\n"
43733 "MMUPageSize: %8lu kB\n"
43734 "Locked: %8lu kB\n",
43735 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43736 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43737 +#else
43738 (vma->vm_end - vma->vm_start) >> 10,
43739 +#endif
43740 mss.resident >> 10,
43741 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43742 mss.shared_clean >> 10,
43743 @@ -1001,7 +1032,7 @@ static int show_numa_map(struct seq_file
43744
43745 if (file) {
43746 seq_printf(m, " file=");
43747 - seq_path(m, &file->f_path, "\n\t= ");
43748 + seq_path(m, &file->f_path, "\n\t\\= ");
43749 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
43750 seq_printf(m, " heap");
43751 } else if (vma->vm_start <= mm->start_stack &&
43752 diff -urNp linux-3.0.4/fs/proc/task_nommu.c linux-3.0.4/fs/proc/task_nommu.c
43753 --- linux-3.0.4/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
43754 +++ linux-3.0.4/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
43755 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
43756 else
43757 bytes += kobjsize(mm);
43758
43759 - if (current->fs && current->fs->users > 1)
43760 + if (current->fs && atomic_read(&current->fs->users) > 1)
43761 sbytes += kobjsize(current->fs);
43762 else
43763 bytes += kobjsize(current->fs);
43764 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
43765
43766 if (file) {
43767 pad_len_spaces(m, len);
43768 - seq_path(m, &file->f_path, "");
43769 + seq_path(m, &file->f_path, "\n\\");
43770 } else if (mm) {
43771 if (vma->vm_start <= mm->start_stack &&
43772 vma->vm_end >= mm->start_stack) {
43773 diff -urNp linux-3.0.4/fs/quota/netlink.c linux-3.0.4/fs/quota/netlink.c
43774 --- linux-3.0.4/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
43775 +++ linux-3.0.4/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
43776 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
43777 void quota_send_warning(short type, unsigned int id, dev_t dev,
43778 const char warntype)
43779 {
43780 - static atomic_t seq;
43781 + static atomic_unchecked_t seq;
43782 struct sk_buff *skb;
43783 void *msg_head;
43784 int ret;
43785 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
43786 "VFS: Not enough memory to send quota warning.\n");
43787 return;
43788 }
43789 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
43790 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
43791 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
43792 if (!msg_head) {
43793 printk(KERN_ERR
43794 diff -urNp linux-3.0.4/fs/readdir.c linux-3.0.4/fs/readdir.c
43795 --- linux-3.0.4/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
43796 +++ linux-3.0.4/fs/readdir.c 2011-08-23 21:48:14.000000000 -0400
43797 @@ -17,6 +17,7 @@
43798 #include <linux/security.h>
43799 #include <linux/syscalls.h>
43800 #include <linux/unistd.h>
43801 +#include <linux/namei.h>
43802
43803 #include <asm/uaccess.h>
43804
43805 @@ -67,6 +68,7 @@ struct old_linux_dirent {
43806
43807 struct readdir_callback {
43808 struct old_linux_dirent __user * dirent;
43809 + struct file * file;
43810 int result;
43811 };
43812
43813 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43814 buf->result = -EOVERFLOW;
43815 return -EOVERFLOW;
43816 }
43817 +
43818 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43819 + return 0;
43820 +
43821 buf->result++;
43822 dirent = buf->dirent;
43823 if (!access_ok(VERIFY_WRITE, dirent,
43824 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43825
43826 buf.result = 0;
43827 buf.dirent = dirent;
43828 + buf.file = file;
43829
43830 error = vfs_readdir(file, fillonedir, &buf);
43831 if (buf.result)
43832 @@ -142,6 +149,7 @@ struct linux_dirent {
43833 struct getdents_callback {
43834 struct linux_dirent __user * current_dir;
43835 struct linux_dirent __user * previous;
43836 + struct file * file;
43837 int count;
43838 int error;
43839 };
43840 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
43841 buf->error = -EOVERFLOW;
43842 return -EOVERFLOW;
43843 }
43844 +
43845 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43846 + return 0;
43847 +
43848 dirent = buf->previous;
43849 if (dirent) {
43850 if (__put_user(offset, &dirent->d_off))
43851 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43852 buf.previous = NULL;
43853 buf.count = count;
43854 buf.error = 0;
43855 + buf.file = file;
43856
43857 error = vfs_readdir(file, filldir, &buf);
43858 if (error >= 0)
43859 @@ -229,6 +242,7 @@ out:
43860 struct getdents_callback64 {
43861 struct linux_dirent64 __user * current_dir;
43862 struct linux_dirent64 __user * previous;
43863 + struct file *file;
43864 int count;
43865 int error;
43866 };
43867 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
43868 buf->error = -EINVAL; /* only used if we fail.. */
43869 if (reclen > buf->count)
43870 return -EINVAL;
43871 +
43872 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43873 + return 0;
43874 +
43875 dirent = buf->previous;
43876 if (dirent) {
43877 if (__put_user(offset, &dirent->d_off))
43878 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43879
43880 buf.current_dir = dirent;
43881 buf.previous = NULL;
43882 + buf.file = file;
43883 buf.count = count;
43884 buf.error = 0;
43885
43886 diff -urNp linux-3.0.4/fs/reiserfs/dir.c linux-3.0.4/fs/reiserfs/dir.c
43887 --- linux-3.0.4/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
43888 +++ linux-3.0.4/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
43889 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43890 struct reiserfs_dir_entry de;
43891 int ret = 0;
43892
43893 + pax_track_stack();
43894 +
43895 reiserfs_write_lock(inode->i_sb);
43896
43897 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43898 diff -urNp linux-3.0.4/fs/reiserfs/do_balan.c linux-3.0.4/fs/reiserfs/do_balan.c
43899 --- linux-3.0.4/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
43900 +++ linux-3.0.4/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
43901 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
43902 return;
43903 }
43904
43905 - atomic_inc(&(fs_generation(tb->tb_sb)));
43906 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43907 do_balance_starts(tb);
43908
43909 /* balance leaf returns 0 except if combining L R and S into
43910 diff -urNp linux-3.0.4/fs/reiserfs/journal.c linux-3.0.4/fs/reiserfs/journal.c
43911 --- linux-3.0.4/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
43912 +++ linux-3.0.4/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
43913 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
43914 struct buffer_head *bh;
43915 int i, j;
43916
43917 + pax_track_stack();
43918 +
43919 bh = __getblk(dev, block, bufsize);
43920 if (buffer_uptodate(bh))
43921 return (bh);
43922 diff -urNp linux-3.0.4/fs/reiserfs/namei.c linux-3.0.4/fs/reiserfs/namei.c
43923 --- linux-3.0.4/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
43924 +++ linux-3.0.4/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
43925 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
43926 unsigned long savelink = 1;
43927 struct timespec ctime;
43928
43929 + pax_track_stack();
43930 +
43931 /* three balancings: (1) old name removal, (2) new name insertion
43932 and (3) maybe "save" link insertion
43933 stat data updates: (1) old directory,
43934 diff -urNp linux-3.0.4/fs/reiserfs/procfs.c linux-3.0.4/fs/reiserfs/procfs.c
43935 --- linux-3.0.4/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
43936 +++ linux-3.0.4/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
43937 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
43938 "SMALL_TAILS " : "NO_TAILS ",
43939 replay_only(sb) ? "REPLAY_ONLY " : "",
43940 convert_reiserfs(sb) ? "CONV " : "",
43941 - atomic_read(&r->s_generation_counter),
43942 + atomic_read_unchecked(&r->s_generation_counter),
43943 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43944 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43945 SF(s_good_search_by_key_reada), SF(s_bmaps),
43946 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
43947 struct journal_params *jp = &rs->s_v1.s_journal;
43948 char b[BDEVNAME_SIZE];
43949
43950 + pax_track_stack();
43951 +
43952 seq_printf(m, /* on-disk fields */
43953 "jp_journal_1st_block: \t%i\n"
43954 "jp_journal_dev: \t%s[%x]\n"
43955 diff -urNp linux-3.0.4/fs/reiserfs/stree.c linux-3.0.4/fs/reiserfs/stree.c
43956 --- linux-3.0.4/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
43957 +++ linux-3.0.4/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
43958 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
43959 int iter = 0;
43960 #endif
43961
43962 + pax_track_stack();
43963 +
43964 BUG_ON(!th->t_trans_id);
43965
43966 init_tb_struct(th, &s_del_balance, sb, path,
43967 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
43968 int retval;
43969 int quota_cut_bytes = 0;
43970
43971 + pax_track_stack();
43972 +
43973 BUG_ON(!th->t_trans_id);
43974
43975 le_key2cpu_key(&cpu_key, key);
43976 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
43977 int quota_cut_bytes;
43978 loff_t tail_pos = 0;
43979
43980 + pax_track_stack();
43981 +
43982 BUG_ON(!th->t_trans_id);
43983
43984 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43985 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
43986 int retval;
43987 int fs_gen;
43988
43989 + pax_track_stack();
43990 +
43991 BUG_ON(!th->t_trans_id);
43992
43993 fs_gen = get_generation(inode->i_sb);
43994 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
43995 int fs_gen = 0;
43996 int quota_bytes = 0;
43997
43998 + pax_track_stack();
43999 +
44000 BUG_ON(!th->t_trans_id);
44001
44002 if (inode) { /* Do we count quotas for item? */
44003 diff -urNp linux-3.0.4/fs/reiserfs/super.c linux-3.0.4/fs/reiserfs/super.c
44004 --- linux-3.0.4/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
44005 +++ linux-3.0.4/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
44006 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
44007 {.option_name = NULL}
44008 };
44009
44010 + pax_track_stack();
44011 +
44012 *blocks = 0;
44013 if (!options || !*options)
44014 /* use default configuration: create tails, journaling on, no
44015 diff -urNp linux-3.0.4/fs/select.c linux-3.0.4/fs/select.c
44016 --- linux-3.0.4/fs/select.c 2011-07-21 22:17:23.000000000 -0400
44017 +++ linux-3.0.4/fs/select.c 2011-08-23 21:48:14.000000000 -0400
44018 @@ -20,6 +20,7 @@
44019 #include <linux/module.h>
44020 #include <linux/slab.h>
44021 #include <linux/poll.h>
44022 +#include <linux/security.h>
44023 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
44024 #include <linux/file.h>
44025 #include <linux/fdtable.h>
44026 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
44027 int retval, i, timed_out = 0;
44028 unsigned long slack = 0;
44029
44030 + pax_track_stack();
44031 +
44032 rcu_read_lock();
44033 retval = max_select_fd(n, fds);
44034 rcu_read_unlock();
44035 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
44036 /* Allocate small arguments on the stack to save memory and be faster */
44037 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
44038
44039 + pax_track_stack();
44040 +
44041 ret = -EINVAL;
44042 if (n < 0)
44043 goto out_nofds;
44044 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
44045 struct poll_list *walk = head;
44046 unsigned long todo = nfds;
44047
44048 + pax_track_stack();
44049 +
44050 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
44051 if (nfds > rlimit(RLIMIT_NOFILE))
44052 return -EINVAL;
44053
44054 diff -urNp linux-3.0.4/fs/seq_file.c linux-3.0.4/fs/seq_file.c
44055 --- linux-3.0.4/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
44056 +++ linux-3.0.4/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
44057 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
44058 return 0;
44059 }
44060 if (!m->buf) {
44061 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
44062 + m->size = PAGE_SIZE;
44063 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44064 if (!m->buf)
44065 return -ENOMEM;
44066 }
44067 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
44068 Eoverflow:
44069 m->op->stop(m, p);
44070 kfree(m->buf);
44071 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
44072 + m->size <<= 1;
44073 + m->buf = kmalloc(m->size, GFP_KERNEL);
44074 return !m->buf ? -ENOMEM : -EAGAIN;
44075 }
44076
44077 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
44078 m->version = file->f_version;
44079 /* grab buffer if we didn't have one */
44080 if (!m->buf) {
44081 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
44082 + m->size = PAGE_SIZE;
44083 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44084 if (!m->buf)
44085 goto Enomem;
44086 }
44087 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
44088 goto Fill;
44089 m->op->stop(m, p);
44090 kfree(m->buf);
44091 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
44092 + m->size <<= 1;
44093 + m->buf = kmalloc(m->size, GFP_KERNEL);
44094 if (!m->buf)
44095 goto Enomem;
44096 m->count = 0;
44097 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
44098 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
44099 void *data)
44100 {
44101 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
44102 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
44103 int res = -ENOMEM;
44104
44105 if (op) {
44106 diff -urNp linux-3.0.4/fs/splice.c linux-3.0.4/fs/splice.c
44107 --- linux-3.0.4/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
44108 +++ linux-3.0.4/fs/splice.c 2011-08-23 21:48:14.000000000 -0400
44109 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
44110 pipe_lock(pipe);
44111
44112 for (;;) {
44113 - if (!pipe->readers) {
44114 + if (!atomic_read(&pipe->readers)) {
44115 send_sig(SIGPIPE, current, 0);
44116 if (!ret)
44117 ret = -EPIPE;
44118 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
44119 do_wakeup = 0;
44120 }
44121
44122 - pipe->waiting_writers++;
44123 + atomic_inc(&pipe->waiting_writers);
44124 pipe_wait(pipe);
44125 - pipe->waiting_writers--;
44126 + atomic_dec(&pipe->waiting_writers);
44127 }
44128
44129 pipe_unlock(pipe);
44130 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
44131 .spd_release = spd_release_page,
44132 };
44133
44134 + pax_track_stack();
44135 +
44136 if (splice_grow_spd(pipe, &spd))
44137 return -ENOMEM;
44138
44139 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
44140 old_fs = get_fs();
44141 set_fs(get_ds());
44142 /* The cast to a user pointer is valid due to the set_fs() */
44143 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
44144 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
44145 set_fs(old_fs);
44146
44147 return res;
44148 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
44149 old_fs = get_fs();
44150 set_fs(get_ds());
44151 /* The cast to a user pointer is valid due to the set_fs() */
44152 - res = vfs_write(file, (const char __user *)buf, count, &pos);
44153 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
44154 set_fs(old_fs);
44155
44156 return res;
44157 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
44158 .spd_release = spd_release_page,
44159 };
44160
44161 + pax_track_stack();
44162 +
44163 if (splice_grow_spd(pipe, &spd))
44164 return -ENOMEM;
44165
44166 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
44167 goto err;
44168
44169 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
44170 - vec[i].iov_base = (void __user *) page_address(page);
44171 + vec[i].iov_base = (__force void __user *) page_address(page);
44172 vec[i].iov_len = this_len;
44173 spd.pages[i] = page;
44174 spd.nr_pages++;
44175 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
44176 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
44177 {
44178 while (!pipe->nrbufs) {
44179 - if (!pipe->writers)
44180 + if (!atomic_read(&pipe->writers))
44181 return 0;
44182
44183 - if (!pipe->waiting_writers && sd->num_spliced)
44184 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
44185 return 0;
44186
44187 if (sd->flags & SPLICE_F_NONBLOCK)
44188 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
44189 * out of the pipe right after the splice_to_pipe(). So set
44190 * PIPE_READERS appropriately.
44191 */
44192 - pipe->readers = 1;
44193 + atomic_set(&pipe->readers, 1);
44194
44195 current->splice_pipe = pipe;
44196 }
44197 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
44198 };
44199 long ret;
44200
44201 + pax_track_stack();
44202 +
44203 pipe = get_pipe_info(file);
44204 if (!pipe)
44205 return -EBADF;
44206 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
44207 ret = -ERESTARTSYS;
44208 break;
44209 }
44210 - if (!pipe->writers)
44211 + if (!atomic_read(&pipe->writers))
44212 break;
44213 - if (!pipe->waiting_writers) {
44214 + if (!atomic_read(&pipe->waiting_writers)) {
44215 if (flags & SPLICE_F_NONBLOCK) {
44216 ret = -EAGAIN;
44217 break;
44218 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
44219 pipe_lock(pipe);
44220
44221 while (pipe->nrbufs >= pipe->buffers) {
44222 - if (!pipe->readers) {
44223 + if (!atomic_read(&pipe->readers)) {
44224 send_sig(SIGPIPE, current, 0);
44225 ret = -EPIPE;
44226 break;
44227 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
44228 ret = -ERESTARTSYS;
44229 break;
44230 }
44231 - pipe->waiting_writers++;
44232 + atomic_inc(&pipe->waiting_writers);
44233 pipe_wait(pipe);
44234 - pipe->waiting_writers--;
44235 + atomic_dec(&pipe->waiting_writers);
44236 }
44237
44238 pipe_unlock(pipe);
44239 @@ -1819,14 +1825,14 @@ retry:
44240 pipe_double_lock(ipipe, opipe);
44241
44242 do {
44243 - if (!opipe->readers) {
44244 + if (!atomic_read(&opipe->readers)) {
44245 send_sig(SIGPIPE, current, 0);
44246 if (!ret)
44247 ret = -EPIPE;
44248 break;
44249 }
44250
44251 - if (!ipipe->nrbufs && !ipipe->writers)
44252 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
44253 break;
44254
44255 /*
44256 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
44257 pipe_double_lock(ipipe, opipe);
44258
44259 do {
44260 - if (!opipe->readers) {
44261 + if (!atomic_read(&opipe->readers)) {
44262 send_sig(SIGPIPE, current, 0);
44263 if (!ret)
44264 ret = -EPIPE;
44265 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
44266 * return EAGAIN if we have the potential of some data in the
44267 * future, otherwise just return 0
44268 */
44269 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
44270 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
44271 ret = -EAGAIN;
44272
44273 pipe_unlock(ipipe);
44274 diff -urNp linux-3.0.4/fs/sysfs/file.c linux-3.0.4/fs/sysfs/file.c
44275 --- linux-3.0.4/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
44276 +++ linux-3.0.4/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
44277 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
44278
44279 struct sysfs_open_dirent {
44280 atomic_t refcnt;
44281 - atomic_t event;
44282 + atomic_unchecked_t event;
44283 wait_queue_head_t poll;
44284 struct list_head buffers; /* goes through sysfs_buffer.list */
44285 };
44286 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
44287 if (!sysfs_get_active(attr_sd))
44288 return -ENODEV;
44289
44290 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
44291 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
44292 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
44293
44294 sysfs_put_active(attr_sd);
44295 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
44296 return -ENOMEM;
44297
44298 atomic_set(&new_od->refcnt, 0);
44299 - atomic_set(&new_od->event, 1);
44300 + atomic_set_unchecked(&new_od->event, 1);
44301 init_waitqueue_head(&new_od->poll);
44302 INIT_LIST_HEAD(&new_od->buffers);
44303 goto retry;
44304 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
44305
44306 sysfs_put_active(attr_sd);
44307
44308 - if (buffer->event != atomic_read(&od->event))
44309 + if (buffer->event != atomic_read_unchecked(&od->event))
44310 goto trigger;
44311
44312 return DEFAULT_POLLMASK;
44313 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
44314
44315 od = sd->s_attr.open;
44316 if (od) {
44317 - atomic_inc(&od->event);
44318 + atomic_inc_unchecked(&od->event);
44319 wake_up_interruptible(&od->poll);
44320 }
44321
44322 diff -urNp linux-3.0.4/fs/sysfs/mount.c linux-3.0.4/fs/sysfs/mount.c
44323 --- linux-3.0.4/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
44324 +++ linux-3.0.4/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
44325 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
44326 .s_name = "",
44327 .s_count = ATOMIC_INIT(1),
44328 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
44329 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44330 + .s_mode = S_IFDIR | S_IRWXU,
44331 +#else
44332 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44333 +#endif
44334 .s_ino = 1,
44335 };
44336
44337 diff -urNp linux-3.0.4/fs/sysfs/symlink.c linux-3.0.4/fs/sysfs/symlink.c
44338 --- linux-3.0.4/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
44339 +++ linux-3.0.4/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
44340 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
44341
44342 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44343 {
44344 - char *page = nd_get_link(nd);
44345 + const char *page = nd_get_link(nd);
44346 if (!IS_ERR(page))
44347 free_page((unsigned long)page);
44348 }
44349 diff -urNp linux-3.0.4/fs/udf/inode.c linux-3.0.4/fs/udf/inode.c
44350 --- linux-3.0.4/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
44351 +++ linux-3.0.4/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
44352 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
44353 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
44354 int lastblock = 0;
44355
44356 + pax_track_stack();
44357 +
44358 prev_epos.offset = udf_file_entry_alloc_offset(inode);
44359 prev_epos.block = iinfo->i_location;
44360 prev_epos.bh = NULL;
44361 diff -urNp linux-3.0.4/fs/udf/misc.c linux-3.0.4/fs/udf/misc.c
44362 --- linux-3.0.4/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
44363 +++ linux-3.0.4/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
44364 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
44365
44366 u8 udf_tag_checksum(const struct tag *t)
44367 {
44368 - u8 *data = (u8 *)t;
44369 + const u8 *data = (const u8 *)t;
44370 u8 checksum = 0;
44371 int i;
44372 for (i = 0; i < sizeof(struct tag); ++i)
44373 diff -urNp linux-3.0.4/fs/utimes.c linux-3.0.4/fs/utimes.c
44374 --- linux-3.0.4/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
44375 +++ linux-3.0.4/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
44376 @@ -1,6 +1,7 @@
44377 #include <linux/compiler.h>
44378 #include <linux/file.h>
44379 #include <linux/fs.h>
44380 +#include <linux/security.h>
44381 #include <linux/linkage.h>
44382 #include <linux/mount.h>
44383 #include <linux/namei.h>
44384 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
44385 goto mnt_drop_write_and_out;
44386 }
44387 }
44388 +
44389 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
44390 + error = -EACCES;
44391 + goto mnt_drop_write_and_out;
44392 + }
44393 +
44394 mutex_lock(&inode->i_mutex);
44395 error = notify_change(path->dentry, &newattrs);
44396 mutex_unlock(&inode->i_mutex);
44397 diff -urNp linux-3.0.4/fs/xattr_acl.c linux-3.0.4/fs/xattr_acl.c
44398 --- linux-3.0.4/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
44399 +++ linux-3.0.4/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
44400 @@ -17,8 +17,8 @@
44401 struct posix_acl *
44402 posix_acl_from_xattr(const void *value, size_t size)
44403 {
44404 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
44405 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
44406 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
44407 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
44408 int count;
44409 struct posix_acl *acl;
44410 struct posix_acl_entry *acl_e;
44411 diff -urNp linux-3.0.4/fs/xattr.c linux-3.0.4/fs/xattr.c
44412 --- linux-3.0.4/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
44413 +++ linux-3.0.4/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
44414 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
44415 * Extended attribute SET operations
44416 */
44417 static long
44418 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
44419 +setxattr(struct path *path, const char __user *name, const void __user *value,
44420 size_t size, int flags)
44421 {
44422 int error;
44423 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
44424 return PTR_ERR(kvalue);
44425 }
44426
44427 - error = vfs_setxattr(d, kname, kvalue, size, flags);
44428 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
44429 + error = -EACCES;
44430 + goto out;
44431 + }
44432 +
44433 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
44434 +out:
44435 kfree(kvalue);
44436 return error;
44437 }
44438 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
44439 return error;
44440 error = mnt_want_write(path.mnt);
44441 if (!error) {
44442 - error = setxattr(path.dentry, name, value, size, flags);
44443 + error = setxattr(&path, name, value, size, flags);
44444 mnt_drop_write(path.mnt);
44445 }
44446 path_put(&path);
44447 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
44448 return error;
44449 error = mnt_want_write(path.mnt);
44450 if (!error) {
44451 - error = setxattr(path.dentry, name, value, size, flags);
44452 + error = setxattr(&path, name, value, size, flags);
44453 mnt_drop_write(path.mnt);
44454 }
44455 path_put(&path);
44456 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
44457 const void __user *,value, size_t, size, int, flags)
44458 {
44459 struct file *f;
44460 - struct dentry *dentry;
44461 int error = -EBADF;
44462
44463 f = fget(fd);
44464 if (!f)
44465 return error;
44466 - dentry = f->f_path.dentry;
44467 - audit_inode(NULL, dentry);
44468 + audit_inode(NULL, f->f_path.dentry);
44469 error = mnt_want_write_file(f);
44470 if (!error) {
44471 - error = setxattr(dentry, name, value, size, flags);
44472 + error = setxattr(&f->f_path, name, value, size, flags);
44473 mnt_drop_write(f->f_path.mnt);
44474 }
44475 fput(f);
44476 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c
44477 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
44478 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
44479 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
44480 xfs_fsop_geom_t fsgeo;
44481 int error;
44482
44483 + memset(&fsgeo, 0, sizeof(fsgeo));
44484 error = xfs_fs_geometry(mp, &fsgeo, 3);
44485 if (error)
44486 return -error;
44487 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c
44488 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
44489 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
44490 @@ -128,7 +128,7 @@ xfs_find_handle(
44491 }
44492
44493 error = -EFAULT;
44494 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
44495 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
44496 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
44497 goto out_put;
44498
44499 diff -urNp linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c
44500 --- linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
44501 +++ linux-3.0.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
44502 @@ -437,7 +437,7 @@ xfs_vn_put_link(
44503 struct nameidata *nd,
44504 void *p)
44505 {
44506 - char *s = nd_get_link(nd);
44507 + const char *s = nd_get_link(nd);
44508
44509 if (!IS_ERR(s))
44510 kfree(s);
44511 diff -urNp linux-3.0.4/fs/xfs/xfs_bmap.c linux-3.0.4/fs/xfs/xfs_bmap.c
44512 --- linux-3.0.4/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
44513 +++ linux-3.0.4/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
44514 @@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
44515 int nmap,
44516 int ret_nmap);
44517 #else
44518 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
44519 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
44520 #endif /* DEBUG */
44521
44522 STATIC int
44523 diff -urNp linux-3.0.4/fs/xfs/xfs_dir2_sf.c linux-3.0.4/fs/xfs/xfs_dir2_sf.c
44524 --- linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
44525 +++ linux-3.0.4/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
44526 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
44527 }
44528
44529 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
44530 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
44531 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
44532 + char name[sfep->namelen];
44533 + memcpy(name, sfep->name, sfep->namelen);
44534 + if (filldir(dirent, name, sfep->namelen,
44535 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
44536 + *offset = off & 0x7fffffff;
44537 + return 0;
44538 + }
44539 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
44540 off & 0x7fffffff, ino, DT_UNKNOWN)) {
44541 *offset = off & 0x7fffffff;
44542 return 0;
44543 diff -urNp linux-3.0.4/grsecurity/gracl_alloc.c linux-3.0.4/grsecurity/gracl_alloc.c
44544 --- linux-3.0.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44545 +++ linux-3.0.4/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
44546 @@ -0,0 +1,105 @@
44547 +#include <linux/kernel.h>
44548 +#include <linux/mm.h>
44549 +#include <linux/slab.h>
44550 +#include <linux/vmalloc.h>
44551 +#include <linux/gracl.h>
44552 +#include <linux/grsecurity.h>
44553 +
44554 +static unsigned long alloc_stack_next = 1;
44555 +static unsigned long alloc_stack_size = 1;
44556 +static void **alloc_stack;
44557 +
44558 +static __inline__ int
44559 +alloc_pop(void)
44560 +{
44561 + if (alloc_stack_next == 1)
44562 + return 0;
44563 +
44564 + kfree(alloc_stack[alloc_stack_next - 2]);
44565 +
44566 + alloc_stack_next--;
44567 +
44568 + return 1;
44569 +}
44570 +
44571 +static __inline__ int
44572 +alloc_push(void *buf)
44573 +{
44574 + if (alloc_stack_next >= alloc_stack_size)
44575 + return 1;
44576 +
44577 + alloc_stack[alloc_stack_next - 1] = buf;
44578 +
44579 + alloc_stack_next++;
44580 +
44581 + return 0;
44582 +}
44583 +
44584 +void *
44585 +acl_alloc(unsigned long len)
44586 +{
44587 + void *ret = NULL;
44588 +
44589 + if (!len || len > PAGE_SIZE)
44590 + goto out;
44591 +
44592 + ret = kmalloc(len, GFP_KERNEL);
44593 +
44594 + if (ret) {
44595 + if (alloc_push(ret)) {
44596 + kfree(ret);
44597 + ret = NULL;
44598 + }
44599 + }
44600 +
44601 +out:
44602 + return ret;
44603 +}
44604 +
44605 +void *
44606 +acl_alloc_num(unsigned long num, unsigned long len)
44607 +{
44608 + if (!len || (num > (PAGE_SIZE / len)))
44609 + return NULL;
44610 +
44611 + return acl_alloc(num * len);
44612 +}
44613 +
44614 +void
44615 +acl_free_all(void)
44616 +{
44617 + if (gr_acl_is_enabled() || !alloc_stack)
44618 + return;
44619 +
44620 + while (alloc_pop()) ;
44621 +
44622 + if (alloc_stack) {
44623 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44624 + kfree(alloc_stack);
44625 + else
44626 + vfree(alloc_stack);
44627 + }
44628 +
44629 + alloc_stack = NULL;
44630 + alloc_stack_size = 1;
44631 + alloc_stack_next = 1;
44632 +
44633 + return;
44634 +}
44635 +
44636 +int
44637 +acl_alloc_stack_init(unsigned long size)
44638 +{
44639 + if ((size * sizeof (void *)) <= PAGE_SIZE)
44640 + alloc_stack =
44641 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44642 + else
44643 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
44644 +
44645 + alloc_stack_size = size;
44646 +
44647 + if (!alloc_stack)
44648 + return 0;
44649 + else
44650 + return 1;
44651 +}
44652 diff -urNp linux-3.0.4/grsecurity/gracl.c linux-3.0.4/grsecurity/gracl.c
44653 --- linux-3.0.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44654 +++ linux-3.0.4/grsecurity/gracl.c 2011-08-23 21:48:14.000000000 -0400
44655 @@ -0,0 +1,4106 @@
44656 +#include <linux/kernel.h>
44657 +#include <linux/module.h>
44658 +#include <linux/sched.h>
44659 +#include <linux/mm.h>
44660 +#include <linux/file.h>
44661 +#include <linux/fs.h>
44662 +#include <linux/namei.h>
44663 +#include <linux/mount.h>
44664 +#include <linux/tty.h>
44665 +#include <linux/proc_fs.h>
44666 +#include <linux/lglock.h>
44667 +#include <linux/slab.h>
44668 +#include <linux/vmalloc.h>
44669 +#include <linux/types.h>
44670 +#include <linux/sysctl.h>
44671 +#include <linux/netdevice.h>
44672 +#include <linux/ptrace.h>
44673 +#include <linux/gracl.h>
44674 +#include <linux/gralloc.h>
44675 +#include <linux/grsecurity.h>
44676 +#include <linux/grinternal.h>
44677 +#include <linux/pid_namespace.h>
44678 +#include <linux/fdtable.h>
44679 +#include <linux/percpu.h>
44680 +
44681 +#include <asm/uaccess.h>
44682 +#include <asm/errno.h>
44683 +#include <asm/mman.h>
44684 +
44685 +static struct acl_role_db acl_role_set;
44686 +static struct name_db name_set;
44687 +static struct inodev_db inodev_set;
44688 +
44689 +/* for keeping track of userspace pointers used for subjects, so we
44690 + can share references in the kernel as well
44691 +*/
44692 +
44693 +static struct path real_root;
44694 +
44695 +static struct acl_subj_map_db subj_map_set;
44696 +
44697 +static struct acl_role_label *default_role;
44698 +
44699 +static struct acl_role_label *role_list;
44700 +
44701 +static u16 acl_sp_role_value;
44702 +
44703 +extern char *gr_shared_page[4];
44704 +static DEFINE_MUTEX(gr_dev_mutex);
44705 +DEFINE_RWLOCK(gr_inode_lock);
44706 +
44707 +struct gr_arg *gr_usermode;
44708 +
44709 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
44710 +
44711 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44712 +extern void gr_clear_learn_entries(void);
44713 +
44714 +#ifdef CONFIG_GRKERNSEC_RESLOG
44715 +extern void gr_log_resource(const struct task_struct *task,
44716 + const int res, const unsigned long wanted, const int gt);
44717 +#endif
44718 +
44719 +unsigned char *gr_system_salt;
44720 +unsigned char *gr_system_sum;
44721 +
44722 +static struct sprole_pw **acl_special_roles = NULL;
44723 +static __u16 num_sprole_pws = 0;
44724 +
44725 +static struct acl_role_label *kernel_role = NULL;
44726 +
44727 +static unsigned int gr_auth_attempts = 0;
44728 +static unsigned long gr_auth_expires = 0UL;
44729 +
44730 +#ifdef CONFIG_NET
44731 +extern struct vfsmount *sock_mnt;
44732 +#endif
44733 +
44734 +extern struct vfsmount *pipe_mnt;
44735 +extern struct vfsmount *shm_mnt;
44736 +#ifdef CONFIG_HUGETLBFS
44737 +extern struct vfsmount *hugetlbfs_vfsmount;
44738 +#endif
44739 +
44740 +static struct acl_object_label *fakefs_obj_rw;
44741 +static struct acl_object_label *fakefs_obj_rwx;
44742 +
44743 +extern int gr_init_uidset(void);
44744 +extern void gr_free_uidset(void);
44745 +extern void gr_remove_uid(uid_t uid);
44746 +extern int gr_find_uid(uid_t uid);
44747 +
44748 +DECLARE_BRLOCK(vfsmount_lock);
44749 +
44750 +__inline__ int
44751 +gr_acl_is_enabled(void)
44752 +{
44753 + return (gr_status & GR_READY);
44754 +}
44755 +
44756 +#ifdef CONFIG_BTRFS_FS
44757 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44758 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44759 +#endif
44760 +
44761 +static inline dev_t __get_dev(const struct dentry *dentry)
44762 +{
44763 +#ifdef CONFIG_BTRFS_FS
44764 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44765 + return get_btrfs_dev_from_inode(dentry->d_inode);
44766 + else
44767 +#endif
44768 + return dentry->d_inode->i_sb->s_dev;
44769 +}
44770 +
44771 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44772 +{
44773 + return __get_dev(dentry);
44774 +}
44775 +
44776 +static char gr_task_roletype_to_char(struct task_struct *task)
44777 +{
44778 + switch (task->role->roletype &
44779 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44780 + GR_ROLE_SPECIAL)) {
44781 + case GR_ROLE_DEFAULT:
44782 + return 'D';
44783 + case GR_ROLE_USER:
44784 + return 'U';
44785 + case GR_ROLE_GROUP:
44786 + return 'G';
44787 + case GR_ROLE_SPECIAL:
44788 + return 'S';
44789 + }
44790 +
44791 + return 'X';
44792 +}
44793 +
44794 +char gr_roletype_to_char(void)
44795 +{
44796 + return gr_task_roletype_to_char(current);
44797 +}
44798 +
44799 +__inline__ int
44800 +gr_acl_tpe_check(void)
44801 +{
44802 + if (unlikely(!(gr_status & GR_READY)))
44803 + return 0;
44804 + if (current->role->roletype & GR_ROLE_TPE)
44805 + return 1;
44806 + else
44807 + return 0;
44808 +}
44809 +
44810 +int
44811 +gr_handle_rawio(const struct inode *inode)
44812 +{
44813 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44814 + if (inode && S_ISBLK(inode->i_mode) &&
44815 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44816 + !capable(CAP_SYS_RAWIO))
44817 + return 1;
44818 +#endif
44819 + return 0;
44820 +}
44821 +
44822 +static int
44823 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44824 +{
44825 + if (likely(lena != lenb))
44826 + return 0;
44827 +
44828 + return !memcmp(a, b, lena);
44829 +}
44830 +
44831 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
44832 +{
44833 + *buflen -= namelen;
44834 + if (*buflen < 0)
44835 + return -ENAMETOOLONG;
44836 + *buffer -= namelen;
44837 + memcpy(*buffer, str, namelen);
44838 + return 0;
44839 +}
44840 +
44841 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
44842 +{
44843 + return prepend(buffer, buflen, name->name, name->len);
44844 +}
44845 +
44846 +static int prepend_path(const struct path *path, struct path *root,
44847 + char **buffer, int *buflen)
44848 +{
44849 + struct dentry *dentry = path->dentry;
44850 + struct vfsmount *vfsmnt = path->mnt;
44851 + bool slash = false;
44852 + int error = 0;
44853 +
44854 + while (dentry != root->dentry || vfsmnt != root->mnt) {
44855 + struct dentry * parent;
44856 +
44857 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44858 + /* Global root? */
44859 + if (vfsmnt->mnt_parent == vfsmnt) {
44860 + goto out;
44861 + }
44862 + dentry = vfsmnt->mnt_mountpoint;
44863 + vfsmnt = vfsmnt->mnt_parent;
44864 + continue;
44865 + }
44866 + parent = dentry->d_parent;
44867 + prefetch(parent);
44868 + spin_lock(&dentry->d_lock);
44869 + error = prepend_name(buffer, buflen, &dentry->d_name);
44870 + spin_unlock(&dentry->d_lock);
44871 + if (!error)
44872 + error = prepend(buffer, buflen, "/", 1);
44873 + if (error)
44874 + break;
44875 +
44876 + slash = true;
44877 + dentry = parent;
44878 + }
44879 +
44880 +out:
44881 + if (!error && !slash)
44882 + error = prepend(buffer, buflen, "/", 1);
44883 +
44884 + return error;
44885 +}
44886 +
44887 +/* this must be called with vfsmount_lock and rename_lock held */
44888 +
44889 +static char *__our_d_path(const struct path *path, struct path *root,
44890 + char *buf, int buflen)
44891 +{
44892 + char *res = buf + buflen;
44893 + int error;
44894 +
44895 + prepend(&res, &buflen, "\0", 1);
44896 + error = prepend_path(path, root, &res, &buflen);
44897 + if (error)
44898 + return ERR_PTR(error);
44899 +
44900 + return res;
44901 +}
44902 +
44903 +static char *
44904 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
44905 +{
44906 + char *retval;
44907 +
44908 + retval = __our_d_path(path, root, buf, buflen);
44909 + if (unlikely(IS_ERR(retval)))
44910 + retval = strcpy(buf, "<path too long>");
44911 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44912 + retval[1] = '\0';
44913 +
44914 + return retval;
44915 +}
44916 +
44917 +static char *
44918 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44919 + char *buf, int buflen)
44920 +{
44921 + struct path path;
44922 + char *res;
44923 +
44924 + path.dentry = (struct dentry *)dentry;
44925 + path.mnt = (struct vfsmount *)vfsmnt;
44926 +
44927 + /* we can use real_root.dentry, real_root.mnt, because this is only called
44928 + by the RBAC system */
44929 + res = gen_full_path(&path, &real_root, buf, buflen);
44930 +
44931 + return res;
44932 +}
44933 +
44934 +static char *
44935 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44936 + char *buf, int buflen)
44937 +{
44938 + char *res;
44939 + struct path path;
44940 + struct path root;
44941 + struct task_struct *reaper = &init_task;
44942 +
44943 + path.dentry = (struct dentry *)dentry;
44944 + path.mnt = (struct vfsmount *)vfsmnt;
44945 +
44946 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
44947 + get_fs_root(reaper->fs, &root);
44948 +
44949 + write_seqlock(&rename_lock);
44950 + br_read_lock(vfsmount_lock);
44951 + res = gen_full_path(&path, &root, buf, buflen);
44952 + br_read_unlock(vfsmount_lock);
44953 + write_sequnlock(&rename_lock);
44954 +
44955 + path_put(&root);
44956 + return res;
44957 +}
44958 +
44959 +static char *
44960 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44961 +{
44962 + char *ret;
44963 + write_seqlock(&rename_lock);
44964 + br_read_lock(vfsmount_lock);
44965 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44966 + PAGE_SIZE);
44967 + br_read_unlock(vfsmount_lock);
44968 + write_sequnlock(&rename_lock);
44969 + return ret;
44970 +}
44971 +
44972 +char *
44973 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44974 +{
44975 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44976 + PAGE_SIZE);
44977 +}
44978 +
44979 +char *
44980 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44981 +{
44982 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44983 + PAGE_SIZE);
44984 +}
44985 +
44986 +char *
44987 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44988 +{
44989 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44990 + PAGE_SIZE);
44991 +}
44992 +
44993 +char *
44994 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44995 +{
44996 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44997 + PAGE_SIZE);
44998 +}
44999 +
45000 +char *
45001 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
45002 +{
45003 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
45004 + PAGE_SIZE);
45005 +}
45006 +
45007 +__inline__ __u32
45008 +to_gr_audit(const __u32 reqmode)
45009 +{
45010 + /* masks off auditable permission flags, then shifts them to create
45011 + auditing flags, and adds the special case of append auditing if
45012 + we're requesting write */
45013 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
45014 +}
45015 +
45016 +struct acl_subject_label *
45017 +lookup_subject_map(const struct acl_subject_label *userp)
45018 +{
45019 + unsigned int index = shash(userp, subj_map_set.s_size);
45020 + struct subject_map *match;
45021 +
45022 + match = subj_map_set.s_hash[index];
45023 +
45024 + while (match && match->user != userp)
45025 + match = match->next;
45026 +
45027 + if (match != NULL)
45028 + return match->kernel;
45029 + else
45030 + return NULL;
45031 +}
45032 +
45033 +static void
45034 +insert_subj_map_entry(struct subject_map *subjmap)
45035 +{
45036 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
45037 + struct subject_map **curr;
45038 +
45039 + subjmap->prev = NULL;
45040 +
45041 + curr = &subj_map_set.s_hash[index];
45042 + if (*curr != NULL)
45043 + (*curr)->prev = subjmap;
45044 +
45045 + subjmap->next = *curr;
45046 + *curr = subjmap;
45047 +
45048 + return;
45049 +}
45050 +
45051 +static struct acl_role_label *
45052 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
45053 + const gid_t gid)
45054 +{
45055 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
45056 + struct acl_role_label *match;
45057 + struct role_allowed_ip *ipp;
45058 + unsigned int x;
45059 + u32 curr_ip = task->signal->curr_ip;
45060 +
45061 + task->signal->saved_ip = curr_ip;
45062 +
45063 + match = acl_role_set.r_hash[index];
45064 +
45065 + while (match) {
45066 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
45067 + for (x = 0; x < match->domain_child_num; x++) {
45068 + if (match->domain_children[x] == uid)
45069 + goto found;
45070 + }
45071 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
45072 + break;
45073 + match = match->next;
45074 + }
45075 +found:
45076 + if (match == NULL) {
45077 + try_group:
45078 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
45079 + match = acl_role_set.r_hash[index];
45080 +
45081 + while (match) {
45082 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
45083 + for (x = 0; x < match->domain_child_num; x++) {
45084 + if (match->domain_children[x] == gid)
45085 + goto found2;
45086 + }
45087 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
45088 + break;
45089 + match = match->next;
45090 + }
45091 +found2:
45092 + if (match == NULL)
45093 + match = default_role;
45094 + if (match->allowed_ips == NULL)
45095 + return match;
45096 + else {
45097 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
45098 + if (likely
45099 + ((ntohl(curr_ip) & ipp->netmask) ==
45100 + (ntohl(ipp->addr) & ipp->netmask)))
45101 + return match;
45102 + }
45103 + match = default_role;
45104 + }
45105 + } else if (match->allowed_ips == NULL) {
45106 + return match;
45107 + } else {
45108 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
45109 + if (likely
45110 + ((ntohl(curr_ip) & ipp->netmask) ==
45111 + (ntohl(ipp->addr) & ipp->netmask)))
45112 + return match;
45113 + }
45114 + goto try_group;
45115 + }
45116 +
45117 + return match;
45118 +}
45119 +
45120 +struct acl_subject_label *
45121 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
45122 + const struct acl_role_label *role)
45123 +{
45124 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
45125 + struct acl_subject_label *match;
45126 +
45127 + match = role->subj_hash[index];
45128 +
45129 + while (match && (match->inode != ino || match->device != dev ||
45130 + (match->mode & GR_DELETED))) {
45131 + match = match->next;
45132 + }
45133 +
45134 + if (match && !(match->mode & GR_DELETED))
45135 + return match;
45136 + else
45137 + return NULL;
45138 +}
45139 +
45140 +struct acl_subject_label *
45141 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
45142 + const struct acl_role_label *role)
45143 +{
45144 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
45145 + struct acl_subject_label *match;
45146 +
45147 + match = role->subj_hash[index];
45148 +
45149 + while (match && (match->inode != ino || match->device != dev ||
45150 + !(match->mode & GR_DELETED))) {
45151 + match = match->next;
45152 + }
45153 +
45154 + if (match && (match->mode & GR_DELETED))
45155 + return match;
45156 + else
45157 + return NULL;
45158 +}
45159 +
45160 +static struct acl_object_label *
45161 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
45162 + const struct acl_subject_label *subj)
45163 +{
45164 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45165 + struct acl_object_label *match;
45166 +
45167 + match = subj->obj_hash[index];
45168 +
45169 + while (match && (match->inode != ino || match->device != dev ||
45170 + (match->mode & GR_DELETED))) {
45171 + match = match->next;
45172 + }
45173 +
45174 + if (match && !(match->mode & GR_DELETED))
45175 + return match;
45176 + else
45177 + return NULL;
45178 +}
45179 +
45180 +static struct acl_object_label *
45181 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
45182 + const struct acl_subject_label *subj)
45183 +{
45184 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45185 + struct acl_object_label *match;
45186 +
45187 + match = subj->obj_hash[index];
45188 +
45189 + while (match && (match->inode != ino || match->device != dev ||
45190 + !(match->mode & GR_DELETED))) {
45191 + match = match->next;
45192 + }
45193 +
45194 + if (match && (match->mode & GR_DELETED))
45195 + return match;
45196 +
45197 + match = subj->obj_hash[index];
45198 +
45199 + while (match && (match->inode != ino || match->device != dev ||
45200 + (match->mode & GR_DELETED))) {
45201 + match = match->next;
45202 + }
45203 +
45204 + if (match && !(match->mode & GR_DELETED))
45205 + return match;
45206 + else
45207 + return NULL;
45208 +}
45209 +
45210 +static struct name_entry *
45211 +lookup_name_entry(const char *name)
45212 +{
45213 + unsigned int len = strlen(name);
45214 + unsigned int key = full_name_hash(name, len);
45215 + unsigned int index = key % name_set.n_size;
45216 + struct name_entry *match;
45217 +
45218 + match = name_set.n_hash[index];
45219 +
45220 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
45221 + match = match->next;
45222 +
45223 + return match;
45224 +}
45225 +
45226 +static struct name_entry *
45227 +lookup_name_entry_create(const char *name)
45228 +{
45229 + unsigned int len = strlen(name);
45230 + unsigned int key = full_name_hash(name, len);
45231 + unsigned int index = key % name_set.n_size;
45232 + struct name_entry *match;
45233 +
45234 + match = name_set.n_hash[index];
45235 +
45236 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45237 + !match->deleted))
45238 + match = match->next;
45239 +
45240 + if (match && match->deleted)
45241 + return match;
45242 +
45243 + match = name_set.n_hash[index];
45244 +
45245 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45246 + match->deleted))
45247 + match = match->next;
45248 +
45249 + if (match && !match->deleted)
45250 + return match;
45251 + else
45252 + return NULL;
45253 +}
45254 +
45255 +static struct inodev_entry *
45256 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
45257 +{
45258 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
45259 + struct inodev_entry *match;
45260 +
45261 + match = inodev_set.i_hash[index];
45262 +
45263 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
45264 + match = match->next;
45265 +
45266 + return match;
45267 +}
45268 +
45269 +static void
45270 +insert_inodev_entry(struct inodev_entry *entry)
45271 +{
45272 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
45273 + inodev_set.i_size);
45274 + struct inodev_entry **curr;
45275 +
45276 + entry->prev = NULL;
45277 +
45278 + curr = &inodev_set.i_hash[index];
45279 + if (*curr != NULL)
45280 + (*curr)->prev = entry;
45281 +
45282 + entry->next = *curr;
45283 + *curr = entry;
45284 +
45285 + return;
45286 +}
45287 +
45288 +static void
45289 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
45290 +{
45291 + unsigned int index =
45292 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
45293 + struct acl_role_label **curr;
45294 + struct acl_role_label *tmp;
45295 +
45296 + curr = &acl_role_set.r_hash[index];
45297 +
45298 + /* if role was already inserted due to domains and already has
45299 + a role in the same bucket as it attached, then we need to
45300 + combine these two buckets
45301 + */
45302 + if (role->next) {
45303 + tmp = role->next;
45304 + while (tmp->next)
45305 + tmp = tmp->next;
45306 + tmp->next = *curr;
45307 + } else
45308 + role->next = *curr;
45309 + *curr = role;
45310 +
45311 + return;
45312 +}
45313 +
45314 +static void
45315 +insert_acl_role_label(struct acl_role_label *role)
45316 +{
45317 + int i;
45318 +
45319 + if (role_list == NULL) {
45320 + role_list = role;
45321 + role->prev = NULL;
45322 + } else {
45323 + role->prev = role_list;
45324 + role_list = role;
45325 + }
45326 +
45327 + /* used for hash chains */
45328 + role->next = NULL;
45329 +
45330 + if (role->roletype & GR_ROLE_DOMAIN) {
45331 + for (i = 0; i < role->domain_child_num; i++)
45332 + __insert_acl_role_label(role, role->domain_children[i]);
45333 + } else
45334 + __insert_acl_role_label(role, role->uidgid);
45335 +}
45336 +
45337 +static int
45338 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
45339 +{
45340 + struct name_entry **curr, *nentry;
45341 + struct inodev_entry *ientry;
45342 + unsigned int len = strlen(name);
45343 + unsigned int key = full_name_hash(name, len);
45344 + unsigned int index = key % name_set.n_size;
45345 +
45346 + curr = &name_set.n_hash[index];
45347 +
45348 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
45349 + curr = &((*curr)->next);
45350 +
45351 + if (*curr != NULL)
45352 + return 1;
45353 +
45354 + nentry = acl_alloc(sizeof (struct name_entry));
45355 + if (nentry == NULL)
45356 + return 0;
45357 + ientry = acl_alloc(sizeof (struct inodev_entry));
45358 + if (ientry == NULL)
45359 + return 0;
45360 + ientry->nentry = nentry;
45361 +
45362 + nentry->key = key;
45363 + nentry->name = name;
45364 + nentry->inode = inode;
45365 + nentry->device = device;
45366 + nentry->len = len;
45367 + nentry->deleted = deleted;
45368 +
45369 + nentry->prev = NULL;
45370 + curr = &name_set.n_hash[index];
45371 + if (*curr != NULL)
45372 + (*curr)->prev = nentry;
45373 + nentry->next = *curr;
45374 + *curr = nentry;
45375 +
45376 + /* insert us into the table searchable by inode/dev */
45377 + insert_inodev_entry(ientry);
45378 +
45379 + return 1;
45380 +}
45381 +
45382 +static void
45383 +insert_acl_obj_label(struct acl_object_label *obj,
45384 + struct acl_subject_label *subj)
45385 +{
45386 + unsigned int index =
45387 + fhash(obj->inode, obj->device, subj->obj_hash_size);
45388 + struct acl_object_label **curr;
45389 +
45390 +
45391 + obj->prev = NULL;
45392 +
45393 + curr = &subj->obj_hash[index];
45394 + if (*curr != NULL)
45395 + (*curr)->prev = obj;
45396 +
45397 + obj->next = *curr;
45398 + *curr = obj;
45399 +
45400 + return;
45401 +}
45402 +
45403 +static void
45404 +insert_acl_subj_label(struct acl_subject_label *obj,
45405 + struct acl_role_label *role)
45406 +{
45407 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
45408 + struct acl_subject_label **curr;
45409 +
45410 + obj->prev = NULL;
45411 +
45412 + curr = &role->subj_hash[index];
45413 + if (*curr != NULL)
45414 + (*curr)->prev = obj;
45415 +
45416 + obj->next = *curr;
45417 + *curr = obj;
45418 +
45419 + return;
45420 +}
45421 +
45422 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
45423 +
45424 +static void *
45425 +create_table(__u32 * len, int elementsize)
45426 +{
45427 + unsigned int table_sizes[] = {
45428 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
45429 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
45430 + 4194301, 8388593, 16777213, 33554393, 67108859
45431 + };
45432 + void *newtable = NULL;
45433 + unsigned int pwr = 0;
45434 +
45435 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
45436 + table_sizes[pwr] <= *len)
45437 + pwr++;
45438 +
45439 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
45440 + return newtable;
45441 +
45442 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
45443 + newtable =
45444 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
45445 + else
45446 + newtable = vmalloc(table_sizes[pwr] * elementsize);
45447 +
45448 + *len = table_sizes[pwr];
45449 +
45450 + return newtable;
45451 +}
45452 +
45453 +static int
45454 +init_variables(const struct gr_arg *arg)
45455 +{
45456 + struct task_struct *reaper = &init_task;
45457 + unsigned int stacksize;
45458 +
45459 + subj_map_set.s_size = arg->role_db.num_subjects;
45460 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
45461 + name_set.n_size = arg->role_db.num_objects;
45462 + inodev_set.i_size = arg->role_db.num_objects;
45463 +
45464 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
45465 + !name_set.n_size || !inodev_set.i_size)
45466 + return 1;
45467 +
45468 + if (!gr_init_uidset())
45469 + return 1;
45470 +
45471 + /* set up the stack that holds allocation info */
45472 +
45473 + stacksize = arg->role_db.num_pointers + 5;
45474 +
45475 + if (!acl_alloc_stack_init(stacksize))
45476 + return 1;
45477 +
45478 + /* grab reference for the real root dentry and vfsmount */
45479 + get_fs_root(reaper->fs, &real_root);
45480 +
45481 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45482 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
45483 +#endif
45484 +
45485 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
45486 + if (fakefs_obj_rw == NULL)
45487 + return 1;
45488 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
45489 +
45490 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
45491 + if (fakefs_obj_rwx == NULL)
45492 + return 1;
45493 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
45494 +
45495 + subj_map_set.s_hash =
45496 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
45497 + acl_role_set.r_hash =
45498 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
45499 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
45500 + inodev_set.i_hash =
45501 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
45502 +
45503 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
45504 + !name_set.n_hash || !inodev_set.i_hash)
45505 + return 1;
45506 +
45507 + memset(subj_map_set.s_hash, 0,
45508 + sizeof(struct subject_map *) * subj_map_set.s_size);
45509 + memset(acl_role_set.r_hash, 0,
45510 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
45511 + memset(name_set.n_hash, 0,
45512 + sizeof (struct name_entry *) * name_set.n_size);
45513 + memset(inodev_set.i_hash, 0,
45514 + sizeof (struct inodev_entry *) * inodev_set.i_size);
45515 +
45516 + return 0;
45517 +}
45518 +
45519 +/* free information not needed after startup
45520 + currently contains user->kernel pointer mappings for subjects
45521 +*/
45522 +
45523 +static void
45524 +free_init_variables(void)
45525 +{
45526 + __u32 i;
45527 +
45528 + if (subj_map_set.s_hash) {
45529 + for (i = 0; i < subj_map_set.s_size; i++) {
45530 + if (subj_map_set.s_hash[i]) {
45531 + kfree(subj_map_set.s_hash[i]);
45532 + subj_map_set.s_hash[i] = NULL;
45533 + }
45534 + }
45535 +
45536 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
45537 + PAGE_SIZE)
45538 + kfree(subj_map_set.s_hash);
45539 + else
45540 + vfree(subj_map_set.s_hash);
45541 + }
45542 +
45543 + return;
45544 +}
45545 +
45546 +static void
45547 +free_variables(void)
45548 +{
45549 + struct acl_subject_label *s;
45550 + struct acl_role_label *r;
45551 + struct task_struct *task, *task2;
45552 + unsigned int x;
45553 +
45554 + gr_clear_learn_entries();
45555 +
45556 + read_lock(&tasklist_lock);
45557 + do_each_thread(task2, task) {
45558 + task->acl_sp_role = 0;
45559 + task->acl_role_id = 0;
45560 + task->acl = NULL;
45561 + task->role = NULL;
45562 + } while_each_thread(task2, task);
45563 + read_unlock(&tasklist_lock);
45564 +
45565 + /* release the reference to the real root dentry and vfsmount */
45566 + path_put(&real_root);
45567 +
45568 + /* free all object hash tables */
45569 +
45570 + FOR_EACH_ROLE_START(r)
45571 + if (r->subj_hash == NULL)
45572 + goto next_role;
45573 + FOR_EACH_SUBJECT_START(r, s, x)
45574 + if (s->obj_hash == NULL)
45575 + break;
45576 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45577 + kfree(s->obj_hash);
45578 + else
45579 + vfree(s->obj_hash);
45580 + FOR_EACH_SUBJECT_END(s, x)
45581 + FOR_EACH_NESTED_SUBJECT_START(r, s)
45582 + if (s->obj_hash == NULL)
45583 + break;
45584 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45585 + kfree(s->obj_hash);
45586 + else
45587 + vfree(s->obj_hash);
45588 + FOR_EACH_NESTED_SUBJECT_END(s)
45589 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45590 + kfree(r->subj_hash);
45591 + else
45592 + vfree(r->subj_hash);
45593 + r->subj_hash = NULL;
45594 +next_role:
45595 + FOR_EACH_ROLE_END(r)
45596 +
45597 + acl_free_all();
45598 +
45599 + if (acl_role_set.r_hash) {
45600 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45601 + PAGE_SIZE)
45602 + kfree(acl_role_set.r_hash);
45603 + else
45604 + vfree(acl_role_set.r_hash);
45605 + }
45606 + if (name_set.n_hash) {
45607 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
45608 + PAGE_SIZE)
45609 + kfree(name_set.n_hash);
45610 + else
45611 + vfree(name_set.n_hash);
45612 + }
45613 +
45614 + if (inodev_set.i_hash) {
45615 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45616 + PAGE_SIZE)
45617 + kfree(inodev_set.i_hash);
45618 + else
45619 + vfree(inodev_set.i_hash);
45620 + }
45621 +
45622 + gr_free_uidset();
45623 +
45624 + memset(&name_set, 0, sizeof (struct name_db));
45625 + memset(&inodev_set, 0, sizeof (struct inodev_db));
45626 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45627 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45628 +
45629 + default_role = NULL;
45630 + role_list = NULL;
45631 +
45632 + return;
45633 +}
45634 +
45635 +static __u32
45636 +count_user_objs(struct acl_object_label *userp)
45637 +{
45638 + struct acl_object_label o_tmp;
45639 + __u32 num = 0;
45640 +
45641 + while (userp) {
45642 + if (copy_from_user(&o_tmp, userp,
45643 + sizeof (struct acl_object_label)))
45644 + break;
45645 +
45646 + userp = o_tmp.prev;
45647 + num++;
45648 + }
45649 +
45650 + return num;
45651 +}
45652 +
45653 +static struct acl_subject_label *
45654 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45655 +
45656 +static int
45657 +copy_user_glob(struct acl_object_label *obj)
45658 +{
45659 + struct acl_object_label *g_tmp, **guser;
45660 + unsigned int len;
45661 + char *tmp;
45662 +
45663 + if (obj->globbed == NULL)
45664 + return 0;
45665 +
45666 + guser = &obj->globbed;
45667 + while (*guser) {
45668 + g_tmp = (struct acl_object_label *)
45669 + acl_alloc(sizeof (struct acl_object_label));
45670 + if (g_tmp == NULL)
45671 + return -ENOMEM;
45672 +
45673 + if (copy_from_user(g_tmp, *guser,
45674 + sizeof (struct acl_object_label)))
45675 + return -EFAULT;
45676 +
45677 + len = strnlen_user(g_tmp->filename, PATH_MAX);
45678 +
45679 + if (!len || len >= PATH_MAX)
45680 + return -EINVAL;
45681 +
45682 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45683 + return -ENOMEM;
45684 +
45685 + if (copy_from_user(tmp, g_tmp->filename, len))
45686 + return -EFAULT;
45687 + tmp[len-1] = '\0';
45688 + g_tmp->filename = tmp;
45689 +
45690 + *guser = g_tmp;
45691 + guser = &(g_tmp->next);
45692 + }
45693 +
45694 + return 0;
45695 +}
45696 +
45697 +static int
45698 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45699 + struct acl_role_label *role)
45700 +{
45701 + struct acl_object_label *o_tmp;
45702 + unsigned int len;
45703 + int ret;
45704 + char *tmp;
45705 +
45706 + while (userp) {
45707 + if ((o_tmp = (struct acl_object_label *)
45708 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
45709 + return -ENOMEM;
45710 +
45711 + if (copy_from_user(o_tmp, userp,
45712 + sizeof (struct acl_object_label)))
45713 + return -EFAULT;
45714 +
45715 + userp = o_tmp->prev;
45716 +
45717 + len = strnlen_user(o_tmp->filename, PATH_MAX);
45718 +
45719 + if (!len || len >= PATH_MAX)
45720 + return -EINVAL;
45721 +
45722 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45723 + return -ENOMEM;
45724 +
45725 + if (copy_from_user(tmp, o_tmp->filename, len))
45726 + return -EFAULT;
45727 + tmp[len-1] = '\0';
45728 + o_tmp->filename = tmp;
45729 +
45730 + insert_acl_obj_label(o_tmp, subj);
45731 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45732 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45733 + return -ENOMEM;
45734 +
45735 + ret = copy_user_glob(o_tmp);
45736 + if (ret)
45737 + return ret;
45738 +
45739 + if (o_tmp->nested) {
45740 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45741 + if (IS_ERR(o_tmp->nested))
45742 + return PTR_ERR(o_tmp->nested);
45743 +
45744 + /* insert into nested subject list */
45745 + o_tmp->nested->next = role->hash->first;
45746 + role->hash->first = o_tmp->nested;
45747 + }
45748 + }
45749 +
45750 + return 0;
45751 +}
45752 +
45753 +static __u32
45754 +count_user_subjs(struct acl_subject_label *userp)
45755 +{
45756 + struct acl_subject_label s_tmp;
45757 + __u32 num = 0;
45758 +
45759 + while (userp) {
45760 + if (copy_from_user(&s_tmp, userp,
45761 + sizeof (struct acl_subject_label)))
45762 + break;
45763 +
45764 + userp = s_tmp.prev;
45765 + /* do not count nested subjects against this count, since
45766 + they are not included in the hash table, but are
45767 + attached to objects. We have already counted
45768 + the subjects in userspace for the allocation
45769 + stack
45770 + */
45771 + if (!(s_tmp.mode & GR_NESTED))
45772 + num++;
45773 + }
45774 +
45775 + return num;
45776 +}
45777 +
45778 +static int
45779 +copy_user_allowedips(struct acl_role_label *rolep)
45780 +{
45781 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45782 +
45783 + ruserip = rolep->allowed_ips;
45784 +
45785 + while (ruserip) {
45786 + rlast = rtmp;
45787 +
45788 + if ((rtmp = (struct role_allowed_ip *)
45789 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45790 + return -ENOMEM;
45791 +
45792 + if (copy_from_user(rtmp, ruserip,
45793 + sizeof (struct role_allowed_ip)))
45794 + return -EFAULT;
45795 +
45796 + ruserip = rtmp->prev;
45797 +
45798 + if (!rlast) {
45799 + rtmp->prev = NULL;
45800 + rolep->allowed_ips = rtmp;
45801 + } else {
45802 + rlast->next = rtmp;
45803 + rtmp->prev = rlast;
45804 + }
45805 +
45806 + if (!ruserip)
45807 + rtmp->next = NULL;
45808 + }
45809 +
45810 + return 0;
45811 +}
45812 +
45813 +static int
45814 +copy_user_transitions(struct acl_role_label *rolep)
45815 +{
45816 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
45817 +
45818 + unsigned int len;
45819 + char *tmp;
45820 +
45821 + rusertp = rolep->transitions;
45822 +
45823 + while (rusertp) {
45824 + rlast = rtmp;
45825 +
45826 + if ((rtmp = (struct role_transition *)
45827 + acl_alloc(sizeof (struct role_transition))) == NULL)
45828 + return -ENOMEM;
45829 +
45830 + if (copy_from_user(rtmp, rusertp,
45831 + sizeof (struct role_transition)))
45832 + return -EFAULT;
45833 +
45834 + rusertp = rtmp->prev;
45835 +
45836 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45837 +
45838 + if (!len || len >= GR_SPROLE_LEN)
45839 + return -EINVAL;
45840 +
45841 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45842 + return -ENOMEM;
45843 +
45844 + if (copy_from_user(tmp, rtmp->rolename, len))
45845 + return -EFAULT;
45846 + tmp[len-1] = '\0';
45847 + rtmp->rolename = tmp;
45848 +
45849 + if (!rlast) {
45850 + rtmp->prev = NULL;
45851 + rolep->transitions = rtmp;
45852 + } else {
45853 + rlast->next = rtmp;
45854 + rtmp->prev = rlast;
45855 + }
45856 +
45857 + if (!rusertp)
45858 + rtmp->next = NULL;
45859 + }
45860 +
45861 + return 0;
45862 +}
45863 +
45864 +static struct acl_subject_label *
45865 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45866 +{
45867 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45868 + unsigned int len;
45869 + char *tmp;
45870 + __u32 num_objs;
45871 + struct acl_ip_label **i_tmp, *i_utmp2;
45872 + struct gr_hash_struct ghash;
45873 + struct subject_map *subjmap;
45874 + unsigned int i_num;
45875 + int err;
45876 +
45877 + s_tmp = lookup_subject_map(userp);
45878 +
45879 + /* we've already copied this subject into the kernel, just return
45880 + the reference to it, and don't copy it over again
45881 + */
45882 + if (s_tmp)
45883 + return(s_tmp);
45884 +
45885 + if ((s_tmp = (struct acl_subject_label *)
45886 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45887 + return ERR_PTR(-ENOMEM);
45888 +
45889 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45890 + if (subjmap == NULL)
45891 + return ERR_PTR(-ENOMEM);
45892 +
45893 + subjmap->user = userp;
45894 + subjmap->kernel = s_tmp;
45895 + insert_subj_map_entry(subjmap);
45896 +
45897 + if (copy_from_user(s_tmp, userp,
45898 + sizeof (struct acl_subject_label)))
45899 + return ERR_PTR(-EFAULT);
45900 +
45901 + len = strnlen_user(s_tmp->filename, PATH_MAX);
45902 +
45903 + if (!len || len >= PATH_MAX)
45904 + return ERR_PTR(-EINVAL);
45905 +
45906 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45907 + return ERR_PTR(-ENOMEM);
45908 +
45909 + if (copy_from_user(tmp, s_tmp->filename, len))
45910 + return ERR_PTR(-EFAULT);
45911 + tmp[len-1] = '\0';
45912 + s_tmp->filename = tmp;
45913 +
45914 + if (!strcmp(s_tmp->filename, "/"))
45915 + role->root_label = s_tmp;
45916 +
45917 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45918 + return ERR_PTR(-EFAULT);
45919 +
45920 + /* copy user and group transition tables */
45921 +
45922 + if (s_tmp->user_trans_num) {
45923 + uid_t *uidlist;
45924 +
45925 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45926 + if (uidlist == NULL)
45927 + return ERR_PTR(-ENOMEM);
45928 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45929 + return ERR_PTR(-EFAULT);
45930 +
45931 + s_tmp->user_transitions = uidlist;
45932 + }
45933 +
45934 + if (s_tmp->group_trans_num) {
45935 + gid_t *gidlist;
45936 +
45937 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45938 + if (gidlist == NULL)
45939 + return ERR_PTR(-ENOMEM);
45940 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45941 + return ERR_PTR(-EFAULT);
45942 +
45943 + s_tmp->group_transitions = gidlist;
45944 + }
45945 +
45946 + /* set up object hash table */
45947 + num_objs = count_user_objs(ghash.first);
45948 +
45949 + s_tmp->obj_hash_size = num_objs;
45950 + s_tmp->obj_hash =
45951 + (struct acl_object_label **)
45952 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45953 +
45954 + if (!s_tmp->obj_hash)
45955 + return ERR_PTR(-ENOMEM);
45956 +
45957 + memset(s_tmp->obj_hash, 0,
45958 + s_tmp->obj_hash_size *
45959 + sizeof (struct acl_object_label *));
45960 +
45961 + /* add in objects */
45962 + err = copy_user_objs(ghash.first, s_tmp, role);
45963 +
45964 + if (err)
45965 + return ERR_PTR(err);
45966 +
45967 + /* set pointer for parent subject */
45968 + if (s_tmp->parent_subject) {
45969 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45970 +
45971 + if (IS_ERR(s_tmp2))
45972 + return s_tmp2;
45973 +
45974 + s_tmp->parent_subject = s_tmp2;
45975 + }
45976 +
45977 + /* add in ip acls */
45978 +
45979 + if (!s_tmp->ip_num) {
45980 + s_tmp->ips = NULL;
45981 + goto insert;
45982 + }
45983 +
45984 + i_tmp =
45985 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45986 + sizeof (struct acl_ip_label *));
45987 +
45988 + if (!i_tmp)
45989 + return ERR_PTR(-ENOMEM);
45990 +
45991 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45992 + *(i_tmp + i_num) =
45993 + (struct acl_ip_label *)
45994 + acl_alloc(sizeof (struct acl_ip_label));
45995 + if (!*(i_tmp + i_num))
45996 + return ERR_PTR(-ENOMEM);
45997 +
45998 + if (copy_from_user
45999 + (&i_utmp2, s_tmp->ips + i_num,
46000 + sizeof (struct acl_ip_label *)))
46001 + return ERR_PTR(-EFAULT);
46002 +
46003 + if (copy_from_user
46004 + (*(i_tmp + i_num), i_utmp2,
46005 + sizeof (struct acl_ip_label)))
46006 + return ERR_PTR(-EFAULT);
46007 +
46008 + if ((*(i_tmp + i_num))->iface == NULL)
46009 + continue;
46010 +
46011 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
46012 + if (!len || len >= IFNAMSIZ)
46013 + return ERR_PTR(-EINVAL);
46014 + tmp = acl_alloc(len);
46015 + if (tmp == NULL)
46016 + return ERR_PTR(-ENOMEM);
46017 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
46018 + return ERR_PTR(-EFAULT);
46019 + (*(i_tmp + i_num))->iface = tmp;
46020 + }
46021 +
46022 + s_tmp->ips = i_tmp;
46023 +
46024 +insert:
46025 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
46026 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
46027 + return ERR_PTR(-ENOMEM);
46028 +
46029 + return s_tmp;
46030 +}
46031 +
46032 +static int
46033 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
46034 +{
46035 + struct acl_subject_label s_pre;
46036 + struct acl_subject_label * ret;
46037 + int err;
46038 +
46039 + while (userp) {
46040 + if (copy_from_user(&s_pre, userp,
46041 + sizeof (struct acl_subject_label)))
46042 + return -EFAULT;
46043 +
46044 + /* do not add nested subjects here, add
46045 + while parsing objects
46046 + */
46047 +
46048 + if (s_pre.mode & GR_NESTED) {
46049 + userp = s_pre.prev;
46050 + continue;
46051 + }
46052 +
46053 + ret = do_copy_user_subj(userp, role);
46054 +
46055 + err = PTR_ERR(ret);
46056 + if (IS_ERR(ret))
46057 + return err;
46058 +
46059 + insert_acl_subj_label(ret, role);
46060 +
46061 + userp = s_pre.prev;
46062 + }
46063 +
46064 + return 0;
46065 +}
46066 +
46067 +static int
46068 +copy_user_acl(struct gr_arg *arg)
46069 +{
46070 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
46071 + struct sprole_pw *sptmp;
46072 + struct gr_hash_struct *ghash;
46073 + uid_t *domainlist;
46074 + unsigned int r_num;
46075 + unsigned int len;
46076 + char *tmp;
46077 + int err = 0;
46078 + __u16 i;
46079 + __u32 num_subjs;
46080 +
46081 + /* we need a default and kernel role */
46082 + if (arg->role_db.num_roles < 2)
46083 + return -EINVAL;
46084 +
46085 + /* copy special role authentication info from userspace */
46086 +
46087 + num_sprole_pws = arg->num_sprole_pws;
46088 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
46089 +
46090 + if (!acl_special_roles) {
46091 + err = -ENOMEM;
46092 + goto cleanup;
46093 + }
46094 +
46095 + for (i = 0; i < num_sprole_pws; i++) {
46096 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
46097 + if (!sptmp) {
46098 + err = -ENOMEM;
46099 + goto cleanup;
46100 + }
46101 + if (copy_from_user(sptmp, arg->sprole_pws + i,
46102 + sizeof (struct sprole_pw))) {
46103 + err = -EFAULT;
46104 + goto cleanup;
46105 + }
46106 +
46107 + len =
46108 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
46109 +
46110 + if (!len || len >= GR_SPROLE_LEN) {
46111 + err = -EINVAL;
46112 + goto cleanup;
46113 + }
46114 +
46115 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
46116 + err = -ENOMEM;
46117 + goto cleanup;
46118 + }
46119 +
46120 + if (copy_from_user(tmp, sptmp->rolename, len)) {
46121 + err = -EFAULT;
46122 + goto cleanup;
46123 + }
46124 + tmp[len-1] = '\0';
46125 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46126 + printk(KERN_ALERT "Copying special role %s\n", tmp);
46127 +#endif
46128 + sptmp->rolename = tmp;
46129 + acl_special_roles[i] = sptmp;
46130 + }
46131 +
46132 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
46133 +
46134 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
46135 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
46136 +
46137 + if (!r_tmp) {
46138 + err = -ENOMEM;
46139 + goto cleanup;
46140 + }
46141 +
46142 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
46143 + sizeof (struct acl_role_label *))) {
46144 + err = -EFAULT;
46145 + goto cleanup;
46146 + }
46147 +
46148 + if (copy_from_user(r_tmp, r_utmp2,
46149 + sizeof (struct acl_role_label))) {
46150 + err = -EFAULT;
46151 + goto cleanup;
46152 + }
46153 +
46154 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
46155 +
46156 + if (!len || len >= PATH_MAX) {
46157 + err = -EINVAL;
46158 + goto cleanup;
46159 + }
46160 +
46161 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
46162 + err = -ENOMEM;
46163 + goto cleanup;
46164 + }
46165 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
46166 + err = -EFAULT;
46167 + goto cleanup;
46168 + }
46169 + tmp[len-1] = '\0';
46170 + r_tmp->rolename = tmp;
46171 +
46172 + if (!strcmp(r_tmp->rolename, "default")
46173 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
46174 + default_role = r_tmp;
46175 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
46176 + kernel_role = r_tmp;
46177 + }
46178 +
46179 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
46180 + err = -ENOMEM;
46181 + goto cleanup;
46182 + }
46183 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
46184 + err = -EFAULT;
46185 + goto cleanup;
46186 + }
46187 +
46188 + r_tmp->hash = ghash;
46189 +
46190 + num_subjs = count_user_subjs(r_tmp->hash->first);
46191 +
46192 + r_tmp->subj_hash_size = num_subjs;
46193 + r_tmp->subj_hash =
46194 + (struct acl_subject_label **)
46195 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
46196 +
46197 + if (!r_tmp->subj_hash) {
46198 + err = -ENOMEM;
46199 + goto cleanup;
46200 + }
46201 +
46202 + err = copy_user_allowedips(r_tmp);
46203 + if (err)
46204 + goto cleanup;
46205 +
46206 + /* copy domain info */
46207 + if (r_tmp->domain_children != NULL) {
46208 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
46209 + if (domainlist == NULL) {
46210 + err = -ENOMEM;
46211 + goto cleanup;
46212 + }
46213 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
46214 + err = -EFAULT;
46215 + goto cleanup;
46216 + }
46217 + r_tmp->domain_children = domainlist;
46218 + }
46219 +
46220 + err = copy_user_transitions(r_tmp);
46221 + if (err)
46222 + goto cleanup;
46223 +
46224 + memset(r_tmp->subj_hash, 0,
46225 + r_tmp->subj_hash_size *
46226 + sizeof (struct acl_subject_label *));
46227 +
46228 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
46229 +
46230 + if (err)
46231 + goto cleanup;
46232 +
46233 + /* set nested subject list to null */
46234 + r_tmp->hash->first = NULL;
46235 +
46236 + insert_acl_role_label(r_tmp);
46237 + }
46238 +
46239 + goto return_err;
46240 + cleanup:
46241 + free_variables();
46242 + return_err:
46243 + return err;
46244 +
46245 +}
46246 +
46247 +static int
46248 +gracl_init(struct gr_arg *args)
46249 +{
46250 + int error = 0;
46251 +
46252 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
46253 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
46254 +
46255 + if (init_variables(args)) {
46256 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
46257 + error = -ENOMEM;
46258 + free_variables();
46259 + goto out;
46260 + }
46261 +
46262 + error = copy_user_acl(args);
46263 + free_init_variables();
46264 + if (error) {
46265 + free_variables();
46266 + goto out;
46267 + }
46268 +
46269 + if ((error = gr_set_acls(0))) {
46270 + free_variables();
46271 + goto out;
46272 + }
46273 +
46274 + pax_open_kernel();
46275 + gr_status |= GR_READY;
46276 + pax_close_kernel();
46277 +
46278 + out:
46279 + return error;
46280 +}
46281 +
46282 +/* derived from glibc fnmatch() 0: match, 1: no match*/
46283 +
46284 +static int
46285 +glob_match(const char *p, const char *n)
46286 +{
46287 + char c;
46288 +
46289 + while ((c = *p++) != '\0') {
46290 + switch (c) {
46291 + case '?':
46292 + if (*n == '\0')
46293 + return 1;
46294 + else if (*n == '/')
46295 + return 1;
46296 + break;
46297 + case '\\':
46298 + if (*n != c)
46299 + return 1;
46300 + break;
46301 + case '*':
46302 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
46303 + if (*n == '/')
46304 + return 1;
46305 + else if (c == '?') {
46306 + if (*n == '\0')
46307 + return 1;
46308 + else
46309 + ++n;
46310 + }
46311 + }
46312 + if (c == '\0') {
46313 + return 0;
46314 + } else {
46315 + const char *endp;
46316 +
46317 + if ((endp = strchr(n, '/')) == NULL)
46318 + endp = n + strlen(n);
46319 +
46320 + if (c == '[') {
46321 + for (--p; n < endp; ++n)
46322 + if (!glob_match(p, n))
46323 + return 0;
46324 + } else if (c == '/') {
46325 + while (*n != '\0' && *n != '/')
46326 + ++n;
46327 + if (*n == '/' && !glob_match(p, n + 1))
46328 + return 0;
46329 + } else {
46330 + for (--p; n < endp; ++n)
46331 + if (*n == c && !glob_match(p, n))
46332 + return 0;
46333 + }
46334 +
46335 + return 1;
46336 + }
46337 + case '[':
46338 + {
46339 + int not;
46340 + char cold;
46341 +
46342 + if (*n == '\0' || *n == '/')
46343 + return 1;
46344 +
46345 + not = (*p == '!' || *p == '^');
46346 + if (not)
46347 + ++p;
46348 +
46349 + c = *p++;
46350 + for (;;) {
46351 + unsigned char fn = (unsigned char)*n;
46352 +
46353 + if (c == '\0')
46354 + return 1;
46355 + else {
46356 + if (c == fn)
46357 + goto matched;
46358 + cold = c;
46359 + c = *p++;
46360 +
46361 + if (c == '-' && *p != ']') {
46362 + unsigned char cend = *p++;
46363 +
46364 + if (cend == '\0')
46365 + return 1;
46366 +
46367 + if (cold <= fn && fn <= cend)
46368 + goto matched;
46369 +
46370 + c = *p++;
46371 + }
46372 + }
46373 +
46374 + if (c == ']')
46375 + break;
46376 + }
46377 + if (!not)
46378 + return 1;
46379 + break;
46380 + matched:
46381 + while (c != ']') {
46382 + if (c == '\0')
46383 + return 1;
46384 +
46385 + c = *p++;
46386 + }
46387 + if (not)
46388 + return 1;
46389 + }
46390 + break;
46391 + default:
46392 + if (c != *n)
46393 + return 1;
46394 + }
46395 +
46396 + ++n;
46397 + }
46398 +
46399 + if (*n == '\0')
46400 + return 0;
46401 +
46402 + if (*n == '/')
46403 + return 0;
46404 +
46405 + return 1;
46406 +}
46407 +
46408 +static struct acl_object_label *
46409 +chk_glob_label(struct acl_object_label *globbed,
46410 + struct dentry *dentry, struct vfsmount *mnt, char **path)
46411 +{
46412 + struct acl_object_label *tmp;
46413 +
46414 + if (*path == NULL)
46415 + *path = gr_to_filename_nolock(dentry, mnt);
46416 +
46417 + tmp = globbed;
46418 +
46419 + while (tmp) {
46420 + if (!glob_match(tmp->filename, *path))
46421 + return tmp;
46422 + tmp = tmp->next;
46423 + }
46424 +
46425 + return NULL;
46426 +}
46427 +
46428 +static struct acl_object_label *
46429 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46430 + const ino_t curr_ino, const dev_t curr_dev,
46431 + const struct acl_subject_label *subj, char **path, const int checkglob)
46432 +{
46433 + struct acl_subject_label *tmpsubj;
46434 + struct acl_object_label *retval;
46435 + struct acl_object_label *retval2;
46436 +
46437 + tmpsubj = (struct acl_subject_label *) subj;
46438 + read_lock(&gr_inode_lock);
46439 + do {
46440 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
46441 + if (retval) {
46442 + if (checkglob && retval->globbed) {
46443 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
46444 + (struct vfsmount *)orig_mnt, path);
46445 + if (retval2)
46446 + retval = retval2;
46447 + }
46448 + break;
46449 + }
46450 + } while ((tmpsubj = tmpsubj->parent_subject));
46451 + read_unlock(&gr_inode_lock);
46452 +
46453 + return retval;
46454 +}
46455 +
46456 +static __inline__ struct acl_object_label *
46457 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46458 + struct dentry *curr_dentry,
46459 + const struct acl_subject_label *subj, char **path, const int checkglob)
46460 +{
46461 + int newglob = checkglob;
46462 + ino_t inode;
46463 + dev_t device;
46464 +
46465 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
46466 + as we don't want a / * rule to match instead of the / object
46467 + don't do this for create lookups that call this function though, since they're looking up
46468 + on the parent and thus need globbing checks on all paths
46469 + */
46470 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
46471 + newglob = GR_NO_GLOB;
46472 +
46473 + spin_lock(&curr_dentry->d_lock);
46474 + inode = curr_dentry->d_inode->i_ino;
46475 + device = __get_dev(curr_dentry);
46476 + spin_unlock(&curr_dentry->d_lock);
46477 +
46478 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
46479 +}
46480 +
46481 +static struct acl_object_label *
46482 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46483 + const struct acl_subject_label *subj, char *path, const int checkglob)
46484 +{
46485 + struct dentry *dentry = (struct dentry *) l_dentry;
46486 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46487 + struct acl_object_label *retval;
46488 + struct dentry *parent;
46489 +
46490 + write_seqlock(&rename_lock);
46491 + br_read_lock(vfsmount_lock);
46492 +
46493 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
46494 +#ifdef CONFIG_NET
46495 + mnt == sock_mnt ||
46496 +#endif
46497 +#ifdef CONFIG_HUGETLBFS
46498 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
46499 +#endif
46500 + /* ignore Eric Biederman */
46501 + IS_PRIVATE(l_dentry->d_inode))) {
46502 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
46503 + goto out;
46504 + }
46505 +
46506 + for (;;) {
46507 + if (dentry == real_root.dentry && mnt == real_root.mnt)
46508 + break;
46509 +
46510 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46511 + if (mnt->mnt_parent == mnt)
46512 + break;
46513 +
46514 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46515 + if (retval != NULL)
46516 + goto out;
46517 +
46518 + dentry = mnt->mnt_mountpoint;
46519 + mnt = mnt->mnt_parent;
46520 + continue;
46521 + }
46522 +
46523 + parent = dentry->d_parent;
46524 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46525 + if (retval != NULL)
46526 + goto out;
46527 +
46528 + dentry = parent;
46529 + }
46530 +
46531 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46532 +
46533 + /* real_root is pinned so we don't have to hold a reference */
46534 + if (retval == NULL)
46535 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
46536 +out:
46537 + br_read_unlock(vfsmount_lock);
46538 + write_sequnlock(&rename_lock);
46539 +
46540 + BUG_ON(retval == NULL);
46541 +
46542 + return retval;
46543 +}
46544 +
46545 +static __inline__ struct acl_object_label *
46546 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46547 + const struct acl_subject_label *subj)
46548 +{
46549 + char *path = NULL;
46550 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46551 +}
46552 +
46553 +static __inline__ struct acl_object_label *
46554 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46555 + const struct acl_subject_label *subj)
46556 +{
46557 + char *path = NULL;
46558 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46559 +}
46560 +
46561 +static __inline__ struct acl_object_label *
46562 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46563 + const struct acl_subject_label *subj, char *path)
46564 +{
46565 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46566 +}
46567 +
46568 +static struct acl_subject_label *
46569 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46570 + const struct acl_role_label *role)
46571 +{
46572 + struct dentry *dentry = (struct dentry *) l_dentry;
46573 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46574 + struct acl_subject_label *retval;
46575 + struct dentry *parent;
46576 +
46577 + write_seqlock(&rename_lock);
46578 + br_read_lock(vfsmount_lock);
46579 +
46580 + for (;;) {
46581 + if (dentry == real_root.dentry && mnt == real_root.mnt)
46582 + break;
46583 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46584 + if (mnt->mnt_parent == mnt)
46585 + break;
46586 +
46587 + spin_lock(&dentry->d_lock);
46588 + read_lock(&gr_inode_lock);
46589 + retval =
46590 + lookup_acl_subj_label(dentry->d_inode->i_ino,
46591 + __get_dev(dentry), role);
46592 + read_unlock(&gr_inode_lock);
46593 + spin_unlock(&dentry->d_lock);
46594 + if (retval != NULL)
46595 + goto out;
46596 +
46597 + dentry = mnt->mnt_mountpoint;
46598 + mnt = mnt->mnt_parent;
46599 + continue;
46600 + }
46601 +
46602 + spin_lock(&dentry->d_lock);
46603 + read_lock(&gr_inode_lock);
46604 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46605 + __get_dev(dentry), role);
46606 + read_unlock(&gr_inode_lock);
46607 + parent = dentry->d_parent;
46608 + spin_unlock(&dentry->d_lock);
46609 +
46610 + if (retval != NULL)
46611 + goto out;
46612 +
46613 + dentry = parent;
46614 + }
46615 +
46616 + spin_lock(&dentry->d_lock);
46617 + read_lock(&gr_inode_lock);
46618 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46619 + __get_dev(dentry), role);
46620 + read_unlock(&gr_inode_lock);
46621 + spin_unlock(&dentry->d_lock);
46622 +
46623 + if (unlikely(retval == NULL)) {
46624 + /* real_root is pinned, we don't need to hold a reference */
46625 + read_lock(&gr_inode_lock);
46626 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
46627 + __get_dev(real_root.dentry), role);
46628 + read_unlock(&gr_inode_lock);
46629 + }
46630 +out:
46631 + br_read_unlock(vfsmount_lock);
46632 + write_sequnlock(&rename_lock);
46633 +
46634 + BUG_ON(retval == NULL);
46635 +
46636 + return retval;
46637 +}
46638 +
46639 +static void
46640 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46641 +{
46642 + struct task_struct *task = current;
46643 + const struct cred *cred = current_cred();
46644 +
46645 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46646 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46647 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46648 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46649 +
46650 + return;
46651 +}
46652 +
46653 +static void
46654 +gr_log_learn_sysctl(const char *path, const __u32 mode)
46655 +{
46656 + struct task_struct *task = current;
46657 + const struct cred *cred = current_cred();
46658 +
46659 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46660 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46661 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46662 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46663 +
46664 + return;
46665 +}
46666 +
46667 +static void
46668 +gr_log_learn_id_change(const char type, const unsigned int real,
46669 + const unsigned int effective, const unsigned int fs)
46670 +{
46671 + struct task_struct *task = current;
46672 + const struct cred *cred = current_cred();
46673 +
46674 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46675 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46676 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46677 + type, real, effective, fs, &task->signal->saved_ip);
46678 +
46679 + return;
46680 +}
46681 +
46682 +__u32
46683 +gr_check_link(const struct dentry * new_dentry,
46684 + const struct dentry * parent_dentry,
46685 + const struct vfsmount * parent_mnt,
46686 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46687 +{
46688 + struct acl_object_label *obj;
46689 + __u32 oldmode, newmode;
46690 + __u32 needmode;
46691 +
46692 + if (unlikely(!(gr_status & GR_READY)))
46693 + return (GR_CREATE | GR_LINK);
46694 +
46695 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46696 + oldmode = obj->mode;
46697 +
46698 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46699 + oldmode |= (GR_CREATE | GR_LINK);
46700 +
46701 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46702 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46703 + needmode |= GR_SETID | GR_AUDIT_SETID;
46704 +
46705 + newmode =
46706 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46707 + oldmode | needmode);
46708 +
46709 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46710 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46711 + GR_INHERIT | GR_AUDIT_INHERIT);
46712 +
46713 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46714 + goto bad;
46715 +
46716 + if ((oldmode & needmode) != needmode)
46717 + goto bad;
46718 +
46719 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46720 + if ((newmode & needmode) != needmode)
46721 + goto bad;
46722 +
46723 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46724 + return newmode;
46725 +bad:
46726 + needmode = oldmode;
46727 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46728 + needmode |= GR_SETID;
46729 +
46730 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46731 + gr_log_learn(old_dentry, old_mnt, needmode);
46732 + return (GR_CREATE | GR_LINK);
46733 + } else if (newmode & GR_SUPPRESS)
46734 + return GR_SUPPRESS;
46735 + else
46736 + return 0;
46737 +}
46738 +
46739 +__u32
46740 +gr_search_file(const struct dentry * dentry, const __u32 mode,
46741 + const struct vfsmount * mnt)
46742 +{
46743 + __u32 retval = mode;
46744 + struct acl_subject_label *curracl;
46745 + struct acl_object_label *currobj;
46746 +
46747 + if (unlikely(!(gr_status & GR_READY)))
46748 + return (mode & ~GR_AUDITS);
46749 +
46750 + curracl = current->acl;
46751 +
46752 + currobj = chk_obj_label(dentry, mnt, curracl);
46753 + retval = currobj->mode & mode;
46754 +
46755 + /* if we're opening a specified transfer file for writing
46756 + (e.g. /dev/initctl), then transfer our role to init
46757 + */
46758 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46759 + current->role->roletype & GR_ROLE_PERSIST)) {
46760 + struct task_struct *task = init_pid_ns.child_reaper;
46761 +
46762 + if (task->role != current->role) {
46763 + task->acl_sp_role = 0;
46764 + task->acl_role_id = current->acl_role_id;
46765 + task->role = current->role;
46766 + rcu_read_lock();
46767 + read_lock(&grsec_exec_file_lock);
46768 + gr_apply_subject_to_task(task);
46769 + read_unlock(&grsec_exec_file_lock);
46770 + rcu_read_unlock();
46771 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46772 + }
46773 + }
46774 +
46775 + if (unlikely
46776 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46777 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46778 + __u32 new_mode = mode;
46779 +
46780 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46781 +
46782 + retval = new_mode;
46783 +
46784 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46785 + new_mode |= GR_INHERIT;
46786 +
46787 + if (!(mode & GR_NOLEARN))
46788 + gr_log_learn(dentry, mnt, new_mode);
46789 + }
46790 +
46791 + return retval;
46792 +}
46793 +
46794 +__u32
46795 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46796 + const struct vfsmount * mnt, const __u32 mode)
46797 +{
46798 + struct name_entry *match;
46799 + struct acl_object_label *matchpo;
46800 + struct acl_subject_label *curracl;
46801 + char *path;
46802 + __u32 retval;
46803 +
46804 + if (unlikely(!(gr_status & GR_READY)))
46805 + return (mode & ~GR_AUDITS);
46806 +
46807 + preempt_disable();
46808 + path = gr_to_filename_rbac(new_dentry, mnt);
46809 + match = lookup_name_entry_create(path);
46810 +
46811 + if (!match)
46812 + goto check_parent;
46813 +
46814 + curracl = current->acl;
46815 +
46816 + read_lock(&gr_inode_lock);
46817 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46818 + read_unlock(&gr_inode_lock);
46819 +
46820 + if (matchpo) {
46821 + if ((matchpo->mode & mode) !=
46822 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
46823 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46824 + __u32 new_mode = mode;
46825 +
46826 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46827 +
46828 + gr_log_learn(new_dentry, mnt, new_mode);
46829 +
46830 + preempt_enable();
46831 + return new_mode;
46832 + }
46833 + preempt_enable();
46834 + return (matchpo->mode & mode);
46835 + }
46836 +
46837 + check_parent:
46838 + curracl = current->acl;
46839 +
46840 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46841 + retval = matchpo->mode & mode;
46842 +
46843 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46844 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46845 + __u32 new_mode = mode;
46846 +
46847 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46848 +
46849 + gr_log_learn(new_dentry, mnt, new_mode);
46850 + preempt_enable();
46851 + return new_mode;
46852 + }
46853 +
46854 + preempt_enable();
46855 + return retval;
46856 +}
46857 +
46858 +int
46859 +gr_check_hidden_task(const struct task_struct *task)
46860 +{
46861 + if (unlikely(!(gr_status & GR_READY)))
46862 + return 0;
46863 +
46864 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46865 + return 1;
46866 +
46867 + return 0;
46868 +}
46869 +
46870 +int
46871 +gr_check_protected_task(const struct task_struct *task)
46872 +{
46873 + if (unlikely(!(gr_status & GR_READY) || !task))
46874 + return 0;
46875 +
46876 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46877 + task->acl != current->acl)
46878 + return 1;
46879 +
46880 + return 0;
46881 +}
46882 +
46883 +int
46884 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46885 +{
46886 + struct task_struct *p;
46887 + int ret = 0;
46888 +
46889 + if (unlikely(!(gr_status & GR_READY) || !pid))
46890 + return ret;
46891 +
46892 + read_lock(&tasklist_lock);
46893 + do_each_pid_task(pid, type, p) {
46894 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46895 + p->acl != current->acl) {
46896 + ret = 1;
46897 + goto out;
46898 + }
46899 + } while_each_pid_task(pid, type, p);
46900 +out:
46901 + read_unlock(&tasklist_lock);
46902 +
46903 + return ret;
46904 +}
46905 +
46906 +void
46907 +gr_copy_label(struct task_struct *tsk)
46908 +{
46909 + tsk->signal->used_accept = 0;
46910 + tsk->acl_sp_role = 0;
46911 + tsk->acl_role_id = current->acl_role_id;
46912 + tsk->acl = current->acl;
46913 + tsk->role = current->role;
46914 + tsk->signal->curr_ip = current->signal->curr_ip;
46915 + tsk->signal->saved_ip = current->signal->saved_ip;
46916 + if (current->exec_file)
46917 + get_file(current->exec_file);
46918 + tsk->exec_file = current->exec_file;
46919 + tsk->is_writable = current->is_writable;
46920 + if (unlikely(current->signal->used_accept)) {
46921 + current->signal->curr_ip = 0;
46922 + current->signal->saved_ip = 0;
46923 + }
46924 +
46925 + return;
46926 +}
46927 +
46928 +static void
46929 +gr_set_proc_res(struct task_struct *task)
46930 +{
46931 + struct acl_subject_label *proc;
46932 + unsigned short i;
46933 +
46934 + proc = task->acl;
46935 +
46936 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46937 + return;
46938 +
46939 + for (i = 0; i < RLIM_NLIMITS; i++) {
46940 + if (!(proc->resmask & (1 << i)))
46941 + continue;
46942 +
46943 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46944 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46945 + }
46946 +
46947 + return;
46948 +}
46949 +
46950 +extern int __gr_process_user_ban(struct user_struct *user);
46951 +
46952 +int
46953 +gr_check_user_change(int real, int effective, int fs)
46954 +{
46955 + unsigned int i;
46956 + __u16 num;
46957 + uid_t *uidlist;
46958 + int curuid;
46959 + int realok = 0;
46960 + int effectiveok = 0;
46961 + int fsok = 0;
46962 +
46963 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46964 + struct user_struct *user;
46965 +
46966 + if (real == -1)
46967 + goto skipit;
46968 +
46969 + user = find_user(real);
46970 + if (user == NULL)
46971 + goto skipit;
46972 +
46973 + if (__gr_process_user_ban(user)) {
46974 + /* for find_user */
46975 + free_uid(user);
46976 + return 1;
46977 + }
46978 +
46979 + /* for find_user */
46980 + free_uid(user);
46981 +
46982 +skipit:
46983 +#endif
46984 +
46985 + if (unlikely(!(gr_status & GR_READY)))
46986 + return 0;
46987 +
46988 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46989 + gr_log_learn_id_change('u', real, effective, fs);
46990 +
46991 + num = current->acl->user_trans_num;
46992 + uidlist = current->acl->user_transitions;
46993 +
46994 + if (uidlist == NULL)
46995 + return 0;
46996 +
46997 + if (real == -1)
46998 + realok = 1;
46999 + if (effective == -1)
47000 + effectiveok = 1;
47001 + if (fs == -1)
47002 + fsok = 1;
47003 +
47004 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
47005 + for (i = 0; i < num; i++) {
47006 + curuid = (int)uidlist[i];
47007 + if (real == curuid)
47008 + realok = 1;
47009 + if (effective == curuid)
47010 + effectiveok = 1;
47011 + if (fs == curuid)
47012 + fsok = 1;
47013 + }
47014 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
47015 + for (i = 0; i < num; i++) {
47016 + curuid = (int)uidlist[i];
47017 + if (real == curuid)
47018 + break;
47019 + if (effective == curuid)
47020 + break;
47021 + if (fs == curuid)
47022 + break;
47023 + }
47024 + /* not in deny list */
47025 + if (i == num) {
47026 + realok = 1;
47027 + effectiveok = 1;
47028 + fsok = 1;
47029 + }
47030 + }
47031 +
47032 + if (realok && effectiveok && fsok)
47033 + return 0;
47034 + else {
47035 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
47036 + return 1;
47037 + }
47038 +}
47039 +
47040 +int
47041 +gr_check_group_change(int real, int effective, int fs)
47042 +{
47043 + unsigned int i;
47044 + __u16 num;
47045 + gid_t *gidlist;
47046 + int curgid;
47047 + int realok = 0;
47048 + int effectiveok = 0;
47049 + int fsok = 0;
47050 +
47051 + if (unlikely(!(gr_status & GR_READY)))
47052 + return 0;
47053 +
47054 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47055 + gr_log_learn_id_change('g', real, effective, fs);
47056 +
47057 + num = current->acl->group_trans_num;
47058 + gidlist = current->acl->group_transitions;
47059 +
47060 + if (gidlist == NULL)
47061 + return 0;
47062 +
47063 + if (real == -1)
47064 + realok = 1;
47065 + if (effective == -1)
47066 + effectiveok = 1;
47067 + if (fs == -1)
47068 + fsok = 1;
47069 +
47070 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
47071 + for (i = 0; i < num; i++) {
47072 + curgid = (int)gidlist[i];
47073 + if (real == curgid)
47074 + realok = 1;
47075 + if (effective == curgid)
47076 + effectiveok = 1;
47077 + if (fs == curgid)
47078 + fsok = 1;
47079 + }
47080 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
47081 + for (i = 0; i < num; i++) {
47082 + curgid = (int)gidlist[i];
47083 + if (real == curgid)
47084 + break;
47085 + if (effective == curgid)
47086 + break;
47087 + if (fs == curgid)
47088 + break;
47089 + }
47090 + /* not in deny list */
47091 + if (i == num) {
47092 + realok = 1;
47093 + effectiveok = 1;
47094 + fsok = 1;
47095 + }
47096 + }
47097 +
47098 + if (realok && effectiveok && fsok)
47099 + return 0;
47100 + else {
47101 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
47102 + return 1;
47103 + }
47104 +}
47105 +
47106 +void
47107 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
47108 +{
47109 + struct acl_role_label *role = task->role;
47110 + struct acl_subject_label *subj = NULL;
47111 + struct acl_object_label *obj;
47112 + struct file *filp;
47113 +
47114 + if (unlikely(!(gr_status & GR_READY)))
47115 + return;
47116 +
47117 + filp = task->exec_file;
47118 +
47119 + /* kernel process, we'll give them the kernel role */
47120 + if (unlikely(!filp)) {
47121 + task->role = kernel_role;
47122 + task->acl = kernel_role->root_label;
47123 + return;
47124 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
47125 + role = lookup_acl_role_label(task, uid, gid);
47126 +
47127 + /* perform subject lookup in possibly new role
47128 + we can use this result below in the case where role == task->role
47129 + */
47130 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
47131 +
47132 + /* if we changed uid/gid, but result in the same role
47133 + and are using inheritance, don't lose the inherited subject
47134 + if current subject is other than what normal lookup
47135 + would result in, we arrived via inheritance, don't
47136 + lose subject
47137 + */
47138 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
47139 + (subj == task->acl)))
47140 + task->acl = subj;
47141 +
47142 + task->role = role;
47143 +
47144 + task->is_writable = 0;
47145 +
47146 + /* ignore additional mmap checks for processes that are writable
47147 + by the default ACL */
47148 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47149 + if (unlikely(obj->mode & GR_WRITE))
47150 + task->is_writable = 1;
47151 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47152 + if (unlikely(obj->mode & GR_WRITE))
47153 + task->is_writable = 1;
47154 +
47155 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47156 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47157 +#endif
47158 +
47159 + gr_set_proc_res(task);
47160 +
47161 + return;
47162 +}
47163 +
47164 +int
47165 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47166 + const int unsafe_share)
47167 +{
47168 + struct task_struct *task = current;
47169 + struct acl_subject_label *newacl;
47170 + struct acl_object_label *obj;
47171 + __u32 retmode;
47172 +
47173 + if (unlikely(!(gr_status & GR_READY)))
47174 + return 0;
47175 +
47176 + newacl = chk_subj_label(dentry, mnt, task->role);
47177 +
47178 + task_lock(task);
47179 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
47180 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
47181 + !(task->role->roletype & GR_ROLE_GOD) &&
47182 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
47183 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
47184 + task_unlock(task);
47185 + if (unsafe_share)
47186 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
47187 + else
47188 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
47189 + return -EACCES;
47190 + }
47191 + task_unlock(task);
47192 +
47193 + obj = chk_obj_label(dentry, mnt, task->acl);
47194 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
47195 +
47196 + if (!(task->acl->mode & GR_INHERITLEARN) &&
47197 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
47198 + if (obj->nested)
47199 + task->acl = obj->nested;
47200 + else
47201 + task->acl = newacl;
47202 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
47203 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
47204 +
47205 + task->is_writable = 0;
47206 +
47207 + /* ignore additional mmap checks for processes that are writable
47208 + by the default ACL */
47209 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
47210 + if (unlikely(obj->mode & GR_WRITE))
47211 + task->is_writable = 1;
47212 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
47213 + if (unlikely(obj->mode & GR_WRITE))
47214 + task->is_writable = 1;
47215 +
47216 + gr_set_proc_res(task);
47217 +
47218 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47219 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47220 +#endif
47221 + return 0;
47222 +}
47223 +
47224 +/* always called with valid inodev ptr */
47225 +static void
47226 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
47227 +{
47228 + struct acl_object_label *matchpo;
47229 + struct acl_subject_label *matchps;
47230 + struct acl_subject_label *subj;
47231 + struct acl_role_label *role;
47232 + unsigned int x;
47233 +
47234 + FOR_EACH_ROLE_START(role)
47235 + FOR_EACH_SUBJECT_START(role, subj, x)
47236 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
47237 + matchpo->mode |= GR_DELETED;
47238 + FOR_EACH_SUBJECT_END(subj,x)
47239 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
47240 + if (subj->inode == ino && subj->device == dev)
47241 + subj->mode |= GR_DELETED;
47242 + FOR_EACH_NESTED_SUBJECT_END(subj)
47243 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
47244 + matchps->mode |= GR_DELETED;
47245 + FOR_EACH_ROLE_END(role)
47246 +
47247 + inodev->nentry->deleted = 1;
47248 +
47249 + return;
47250 +}
47251 +
47252 +void
47253 +gr_handle_delete(const ino_t ino, const dev_t dev)
47254 +{
47255 + struct inodev_entry *inodev;
47256 +
47257 + if (unlikely(!(gr_status & GR_READY)))
47258 + return;
47259 +
47260 + write_lock(&gr_inode_lock);
47261 + inodev = lookup_inodev_entry(ino, dev);
47262 + if (inodev != NULL)
47263 + do_handle_delete(inodev, ino, dev);
47264 + write_unlock(&gr_inode_lock);
47265 +
47266 + return;
47267 +}
47268 +
47269 +static void
47270 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
47271 + const ino_t newinode, const dev_t newdevice,
47272 + struct acl_subject_label *subj)
47273 +{
47274 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
47275 + struct acl_object_label *match;
47276 +
47277 + match = subj->obj_hash[index];
47278 +
47279 + while (match && (match->inode != oldinode ||
47280 + match->device != olddevice ||
47281 + !(match->mode & GR_DELETED)))
47282 + match = match->next;
47283 +
47284 + if (match && (match->inode == oldinode)
47285 + && (match->device == olddevice)
47286 + && (match->mode & GR_DELETED)) {
47287 + if (match->prev == NULL) {
47288 + subj->obj_hash[index] = match->next;
47289 + if (match->next != NULL)
47290 + match->next->prev = NULL;
47291 + } else {
47292 + match->prev->next = match->next;
47293 + if (match->next != NULL)
47294 + match->next->prev = match->prev;
47295 + }
47296 + match->prev = NULL;
47297 + match->next = NULL;
47298 + match->inode = newinode;
47299 + match->device = newdevice;
47300 + match->mode &= ~GR_DELETED;
47301 +
47302 + insert_acl_obj_label(match, subj);
47303 + }
47304 +
47305 + return;
47306 +}
47307 +
47308 +static void
47309 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
47310 + const ino_t newinode, const dev_t newdevice,
47311 + struct acl_role_label *role)
47312 +{
47313 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
47314 + struct acl_subject_label *match;
47315 +
47316 + match = role->subj_hash[index];
47317 +
47318 + while (match && (match->inode != oldinode ||
47319 + match->device != olddevice ||
47320 + !(match->mode & GR_DELETED)))
47321 + match = match->next;
47322 +
47323 + if (match && (match->inode == oldinode)
47324 + && (match->device == olddevice)
47325 + && (match->mode & GR_DELETED)) {
47326 + if (match->prev == NULL) {
47327 + role->subj_hash[index] = match->next;
47328 + if (match->next != NULL)
47329 + match->next->prev = NULL;
47330 + } else {
47331 + match->prev->next = match->next;
47332 + if (match->next != NULL)
47333 + match->next->prev = match->prev;
47334 + }
47335 + match->prev = NULL;
47336 + match->next = NULL;
47337 + match->inode = newinode;
47338 + match->device = newdevice;
47339 + match->mode &= ~GR_DELETED;
47340 +
47341 + insert_acl_subj_label(match, role);
47342 + }
47343 +
47344 + return;
47345 +}
47346 +
47347 +static void
47348 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
47349 + const ino_t newinode, const dev_t newdevice)
47350 +{
47351 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
47352 + struct inodev_entry *match;
47353 +
47354 + match = inodev_set.i_hash[index];
47355 +
47356 + while (match && (match->nentry->inode != oldinode ||
47357 + match->nentry->device != olddevice || !match->nentry->deleted))
47358 + match = match->next;
47359 +
47360 + if (match && (match->nentry->inode == oldinode)
47361 + && (match->nentry->device == olddevice) &&
47362 + match->nentry->deleted) {
47363 + if (match->prev == NULL) {
47364 + inodev_set.i_hash[index] = match->next;
47365 + if (match->next != NULL)
47366 + match->next->prev = NULL;
47367 + } else {
47368 + match->prev->next = match->next;
47369 + if (match->next != NULL)
47370 + match->next->prev = match->prev;
47371 + }
47372 + match->prev = NULL;
47373 + match->next = NULL;
47374 + match->nentry->inode = newinode;
47375 + match->nentry->device = newdevice;
47376 + match->nentry->deleted = 0;
47377 +
47378 + insert_inodev_entry(match);
47379 + }
47380 +
47381 + return;
47382 +}
47383 +
47384 +static void
47385 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
47386 + const struct vfsmount *mnt)
47387 +{
47388 + struct acl_subject_label *subj;
47389 + struct acl_role_label *role;
47390 + unsigned int x;
47391 + ino_t ino = dentry->d_inode->i_ino;
47392 + dev_t dev = __get_dev(dentry);
47393 +
47394 + FOR_EACH_ROLE_START(role)
47395 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
47396 +
47397 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
47398 + if ((subj->inode == ino) && (subj->device == dev)) {
47399 + subj->inode = ino;
47400 + subj->device = dev;
47401 + }
47402 + FOR_EACH_NESTED_SUBJECT_END(subj)
47403 + FOR_EACH_SUBJECT_START(role, subj, x)
47404 + update_acl_obj_label(matchn->inode, matchn->device,
47405 + ino, dev, subj);
47406 + FOR_EACH_SUBJECT_END(subj,x)
47407 + FOR_EACH_ROLE_END(role)
47408 +
47409 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
47410 +
47411 + return;
47412 +}
47413 +
47414 +void
47415 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47416 +{
47417 + struct name_entry *matchn;
47418 +
47419 + if (unlikely(!(gr_status & GR_READY)))
47420 + return;
47421 +
47422 + preempt_disable();
47423 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
47424 +
47425 + if (unlikely((unsigned long)matchn)) {
47426 + write_lock(&gr_inode_lock);
47427 + do_handle_create(matchn, dentry, mnt);
47428 + write_unlock(&gr_inode_lock);
47429 + }
47430 + preempt_enable();
47431 +
47432 + return;
47433 +}
47434 +
47435 +void
47436 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47437 + struct dentry *old_dentry,
47438 + struct dentry *new_dentry,
47439 + struct vfsmount *mnt, const __u8 replace)
47440 +{
47441 + struct name_entry *matchn;
47442 + struct inodev_entry *inodev;
47443 + ino_t old_ino = old_dentry->d_inode->i_ino;
47444 + dev_t old_dev = __get_dev(old_dentry);
47445 +
47446 + /* vfs_rename swaps the name and parent link for old_dentry and
47447 + new_dentry
47448 + at this point, old_dentry has the new name, parent link, and inode
47449 + for the renamed file
47450 + if a file is being replaced by a rename, new_dentry has the inode
47451 + and name for the replaced file
47452 + */
47453 +
47454 + if (unlikely(!(gr_status & GR_READY)))
47455 + return;
47456 +
47457 + preempt_disable();
47458 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
47459 +
47460 + /* we wouldn't have to check d_inode if it weren't for
47461 + NFS silly-renaming
47462 + */
47463 +
47464 + write_lock(&gr_inode_lock);
47465 + if (unlikely(replace && new_dentry->d_inode)) {
47466 + ino_t new_ino = new_dentry->d_inode->i_ino;
47467 + dev_t new_dev = __get_dev(new_dentry);
47468 +
47469 + inodev = lookup_inodev_entry(new_ino, new_dev);
47470 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
47471 + do_handle_delete(inodev, new_ino, new_dev);
47472 + }
47473 +
47474 + inodev = lookup_inodev_entry(old_ino, old_dev);
47475 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
47476 + do_handle_delete(inodev, old_ino, old_dev);
47477 +
47478 + if (unlikely((unsigned long)matchn))
47479 + do_handle_create(matchn, old_dentry, mnt);
47480 +
47481 + write_unlock(&gr_inode_lock);
47482 + preempt_enable();
47483 +
47484 + return;
47485 +}
47486 +
47487 +static int
47488 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
47489 + unsigned char **sum)
47490 +{
47491 + struct acl_role_label *r;
47492 + struct role_allowed_ip *ipp;
47493 + struct role_transition *trans;
47494 + unsigned int i;
47495 + int found = 0;
47496 + u32 curr_ip = current->signal->curr_ip;
47497 +
47498 + current->signal->saved_ip = curr_ip;
47499 +
47500 + /* check transition table */
47501 +
47502 + for (trans = current->role->transitions; trans; trans = trans->next) {
47503 + if (!strcmp(rolename, trans->rolename)) {
47504 + found = 1;
47505 + break;
47506 + }
47507 + }
47508 +
47509 + if (!found)
47510 + return 0;
47511 +
47512 + /* handle special roles that do not require authentication
47513 + and check ip */
47514 +
47515 + FOR_EACH_ROLE_START(r)
47516 + if (!strcmp(rolename, r->rolename) &&
47517 + (r->roletype & GR_ROLE_SPECIAL)) {
47518 + found = 0;
47519 + if (r->allowed_ips != NULL) {
47520 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
47521 + if ((ntohl(curr_ip) & ipp->netmask) ==
47522 + (ntohl(ipp->addr) & ipp->netmask))
47523 + found = 1;
47524 + }
47525 + } else
47526 + found = 2;
47527 + if (!found)
47528 + return 0;
47529 +
47530 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
47531 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
47532 + *salt = NULL;
47533 + *sum = NULL;
47534 + return 1;
47535 + }
47536 + }
47537 + FOR_EACH_ROLE_END(r)
47538 +
47539 + for (i = 0; i < num_sprole_pws; i++) {
47540 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
47541 + *salt = acl_special_roles[i]->salt;
47542 + *sum = acl_special_roles[i]->sum;
47543 + return 1;
47544 + }
47545 + }
47546 +
47547 + return 0;
47548 +}
47549 +
47550 +static void
47551 +assign_special_role(char *rolename)
47552 +{
47553 + struct acl_object_label *obj;
47554 + struct acl_role_label *r;
47555 + struct acl_role_label *assigned = NULL;
47556 + struct task_struct *tsk;
47557 + struct file *filp;
47558 +
47559 + FOR_EACH_ROLE_START(r)
47560 + if (!strcmp(rolename, r->rolename) &&
47561 + (r->roletype & GR_ROLE_SPECIAL)) {
47562 + assigned = r;
47563 + break;
47564 + }
47565 + FOR_EACH_ROLE_END(r)
47566 +
47567 + if (!assigned)
47568 + return;
47569 +
47570 + read_lock(&tasklist_lock);
47571 + read_lock(&grsec_exec_file_lock);
47572 +
47573 + tsk = current->real_parent;
47574 + if (tsk == NULL)
47575 + goto out_unlock;
47576 +
47577 + filp = tsk->exec_file;
47578 + if (filp == NULL)
47579 + goto out_unlock;
47580 +
47581 + tsk->is_writable = 0;
47582 +
47583 + tsk->acl_sp_role = 1;
47584 + tsk->acl_role_id = ++acl_sp_role_value;
47585 + tsk->role = assigned;
47586 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47587 +
47588 + /* ignore additional mmap checks for processes that are writable
47589 + by the default ACL */
47590 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47591 + if (unlikely(obj->mode & GR_WRITE))
47592 + tsk->is_writable = 1;
47593 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47594 + if (unlikely(obj->mode & GR_WRITE))
47595 + tsk->is_writable = 1;
47596 +
47597 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47598 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47599 +#endif
47600 +
47601 +out_unlock:
47602 + read_unlock(&grsec_exec_file_lock);
47603 + read_unlock(&tasklist_lock);
47604 + return;
47605 +}
47606 +
47607 +int gr_check_secure_terminal(struct task_struct *task)
47608 +{
47609 + struct task_struct *p, *p2, *p3;
47610 + struct files_struct *files;
47611 + struct fdtable *fdt;
47612 + struct file *our_file = NULL, *file;
47613 + int i;
47614 +
47615 + if (task->signal->tty == NULL)
47616 + return 1;
47617 +
47618 + files = get_files_struct(task);
47619 + if (files != NULL) {
47620 + rcu_read_lock();
47621 + fdt = files_fdtable(files);
47622 + for (i=0; i < fdt->max_fds; i++) {
47623 + file = fcheck_files(files, i);
47624 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47625 + get_file(file);
47626 + our_file = file;
47627 + }
47628 + }
47629 + rcu_read_unlock();
47630 + put_files_struct(files);
47631 + }
47632 +
47633 + if (our_file == NULL)
47634 + return 1;
47635 +
47636 + read_lock(&tasklist_lock);
47637 + do_each_thread(p2, p) {
47638 + files = get_files_struct(p);
47639 + if (files == NULL ||
47640 + (p->signal && p->signal->tty == task->signal->tty)) {
47641 + if (files != NULL)
47642 + put_files_struct(files);
47643 + continue;
47644 + }
47645 + rcu_read_lock();
47646 + fdt = files_fdtable(files);
47647 + for (i=0; i < fdt->max_fds; i++) {
47648 + file = fcheck_files(files, i);
47649 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47650 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47651 + p3 = task;
47652 + while (p3->pid > 0) {
47653 + if (p3 == p)
47654 + break;
47655 + p3 = p3->real_parent;
47656 + }
47657 + if (p3 == p)
47658 + break;
47659 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47660 + gr_handle_alertkill(p);
47661 + rcu_read_unlock();
47662 + put_files_struct(files);
47663 + read_unlock(&tasklist_lock);
47664 + fput(our_file);
47665 + return 0;
47666 + }
47667 + }
47668 + rcu_read_unlock();
47669 + put_files_struct(files);
47670 + } while_each_thread(p2, p);
47671 + read_unlock(&tasklist_lock);
47672 +
47673 + fput(our_file);
47674 + return 1;
47675 +}
47676 +
47677 +ssize_t
47678 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47679 +{
47680 + struct gr_arg_wrapper uwrap;
47681 + unsigned char *sprole_salt = NULL;
47682 + unsigned char *sprole_sum = NULL;
47683 + int error = sizeof (struct gr_arg_wrapper);
47684 + int error2 = 0;
47685 +
47686 + mutex_lock(&gr_dev_mutex);
47687 +
47688 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47689 + error = -EPERM;
47690 + goto out;
47691 + }
47692 +
47693 + if (count != sizeof (struct gr_arg_wrapper)) {
47694 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47695 + error = -EINVAL;
47696 + goto out;
47697 + }
47698 +
47699 +
47700 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47701 + gr_auth_expires = 0;
47702 + gr_auth_attempts = 0;
47703 + }
47704 +
47705 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47706 + error = -EFAULT;
47707 + goto out;
47708 + }
47709 +
47710 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47711 + error = -EINVAL;
47712 + goto out;
47713 + }
47714 +
47715 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47716 + error = -EFAULT;
47717 + goto out;
47718 + }
47719 +
47720 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47721 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47722 + time_after(gr_auth_expires, get_seconds())) {
47723 + error = -EBUSY;
47724 + goto out;
47725 + }
47726 +
47727 + /* if non-root trying to do anything other than use a special role,
47728 + do not attempt authentication, do not count towards authentication
47729 + locking
47730 + */
47731 +
47732 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47733 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47734 + current_uid()) {
47735 + error = -EPERM;
47736 + goto out;
47737 + }
47738 +
47739 + /* ensure pw and special role name are null terminated */
47740 +
47741 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47742 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47743 +
47744 + /* Okay.
47745 + * We have our enough of the argument structure..(we have yet
47746 + * to copy_from_user the tables themselves) . Copy the tables
47747 + * only if we need them, i.e. for loading operations. */
47748 +
47749 + switch (gr_usermode->mode) {
47750 + case GR_STATUS:
47751 + if (gr_status & GR_READY) {
47752 + error = 1;
47753 + if (!gr_check_secure_terminal(current))
47754 + error = 3;
47755 + } else
47756 + error = 2;
47757 + goto out;
47758 + case GR_SHUTDOWN:
47759 + if ((gr_status & GR_READY)
47760 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47761 + pax_open_kernel();
47762 + gr_status &= ~GR_READY;
47763 + pax_close_kernel();
47764 +
47765 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47766 + free_variables();
47767 + memset(gr_usermode, 0, sizeof (struct gr_arg));
47768 + memset(gr_system_salt, 0, GR_SALT_LEN);
47769 + memset(gr_system_sum, 0, GR_SHA_LEN);
47770 + } else if (gr_status & GR_READY) {
47771 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47772 + error = -EPERM;
47773 + } else {
47774 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47775 + error = -EAGAIN;
47776 + }
47777 + break;
47778 + case GR_ENABLE:
47779 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47780 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47781 + else {
47782 + if (gr_status & GR_READY)
47783 + error = -EAGAIN;
47784 + else
47785 + error = error2;
47786 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47787 + }
47788 + break;
47789 + case GR_RELOAD:
47790 + if (!(gr_status & GR_READY)) {
47791 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47792 + error = -EAGAIN;
47793 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47794 + preempt_disable();
47795 +
47796 + pax_open_kernel();
47797 + gr_status &= ~GR_READY;
47798 + pax_close_kernel();
47799 +
47800 + free_variables();
47801 + if (!(error2 = gracl_init(gr_usermode))) {
47802 + preempt_enable();
47803 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47804 + } else {
47805 + preempt_enable();
47806 + error = error2;
47807 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47808 + }
47809 + } else {
47810 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47811 + error = -EPERM;
47812 + }
47813 + break;
47814 + case GR_SEGVMOD:
47815 + if (unlikely(!(gr_status & GR_READY))) {
47816 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47817 + error = -EAGAIN;
47818 + break;
47819 + }
47820 +
47821 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47822 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47823 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47824 + struct acl_subject_label *segvacl;
47825 + segvacl =
47826 + lookup_acl_subj_label(gr_usermode->segv_inode,
47827 + gr_usermode->segv_device,
47828 + current->role);
47829 + if (segvacl) {
47830 + segvacl->crashes = 0;
47831 + segvacl->expires = 0;
47832 + }
47833 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47834 + gr_remove_uid(gr_usermode->segv_uid);
47835 + }
47836 + } else {
47837 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47838 + error = -EPERM;
47839 + }
47840 + break;
47841 + case GR_SPROLE:
47842 + case GR_SPROLEPAM:
47843 + if (unlikely(!(gr_status & GR_READY))) {
47844 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47845 + error = -EAGAIN;
47846 + break;
47847 + }
47848 +
47849 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47850 + current->role->expires = 0;
47851 + current->role->auth_attempts = 0;
47852 + }
47853 +
47854 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47855 + time_after(current->role->expires, get_seconds())) {
47856 + error = -EBUSY;
47857 + goto out;
47858 + }
47859 +
47860 + if (lookup_special_role_auth
47861 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47862 + && ((!sprole_salt && !sprole_sum)
47863 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47864 + char *p = "";
47865 + assign_special_role(gr_usermode->sp_role);
47866 + read_lock(&tasklist_lock);
47867 + if (current->real_parent)
47868 + p = current->real_parent->role->rolename;
47869 + read_unlock(&tasklist_lock);
47870 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47871 + p, acl_sp_role_value);
47872 + } else {
47873 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47874 + error = -EPERM;
47875 + if(!(current->role->auth_attempts++))
47876 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47877 +
47878 + goto out;
47879 + }
47880 + break;
47881 + case GR_UNSPROLE:
47882 + if (unlikely(!(gr_status & GR_READY))) {
47883 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47884 + error = -EAGAIN;
47885 + break;
47886 + }
47887 +
47888 + if (current->role->roletype & GR_ROLE_SPECIAL) {
47889 + char *p = "";
47890 + int i = 0;
47891 +
47892 + read_lock(&tasklist_lock);
47893 + if (current->real_parent) {
47894 + p = current->real_parent->role->rolename;
47895 + i = current->real_parent->acl_role_id;
47896 + }
47897 + read_unlock(&tasklist_lock);
47898 +
47899 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47900 + gr_set_acls(1);
47901 + } else {
47902 + error = -EPERM;
47903 + goto out;
47904 + }
47905 + break;
47906 + default:
47907 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47908 + error = -EINVAL;
47909 + break;
47910 + }
47911 +
47912 + if (error != -EPERM)
47913 + goto out;
47914 +
47915 + if(!(gr_auth_attempts++))
47916 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47917 +
47918 + out:
47919 + mutex_unlock(&gr_dev_mutex);
47920 + return error;
47921 +}
47922 +
47923 +/* must be called with
47924 + rcu_read_lock();
47925 + read_lock(&tasklist_lock);
47926 + read_lock(&grsec_exec_file_lock);
47927 +*/
47928 +int gr_apply_subject_to_task(struct task_struct *task)
47929 +{
47930 + struct acl_object_label *obj;
47931 + char *tmpname;
47932 + struct acl_subject_label *tmpsubj;
47933 + struct file *filp;
47934 + struct name_entry *nmatch;
47935 +
47936 + filp = task->exec_file;
47937 + if (filp == NULL)
47938 + return 0;
47939 +
47940 + /* the following is to apply the correct subject
47941 + on binaries running when the RBAC system
47942 + is enabled, when the binaries have been
47943 + replaced or deleted since their execution
47944 + -----
47945 + when the RBAC system starts, the inode/dev
47946 + from exec_file will be one the RBAC system
47947 + is unaware of. It only knows the inode/dev
47948 + of the present file on disk, or the absence
47949 + of it.
47950 + */
47951 + preempt_disable();
47952 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47953 +
47954 + nmatch = lookup_name_entry(tmpname);
47955 + preempt_enable();
47956 + tmpsubj = NULL;
47957 + if (nmatch) {
47958 + if (nmatch->deleted)
47959 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47960 + else
47961 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47962 + if (tmpsubj != NULL)
47963 + task->acl = tmpsubj;
47964 + }
47965 + if (tmpsubj == NULL)
47966 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47967 + task->role);
47968 + if (task->acl) {
47969 + task->is_writable = 0;
47970 + /* ignore additional mmap checks for processes that are writable
47971 + by the default ACL */
47972 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47973 + if (unlikely(obj->mode & GR_WRITE))
47974 + task->is_writable = 1;
47975 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47976 + if (unlikely(obj->mode & GR_WRITE))
47977 + task->is_writable = 1;
47978 +
47979 + gr_set_proc_res(task);
47980 +
47981 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47982 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47983 +#endif
47984 + } else {
47985 + return 1;
47986 + }
47987 +
47988 + return 0;
47989 +}
47990 +
47991 +int
47992 +gr_set_acls(const int type)
47993 +{
47994 + struct task_struct *task, *task2;
47995 + struct acl_role_label *role = current->role;
47996 + __u16 acl_role_id = current->acl_role_id;
47997 + const struct cred *cred;
47998 + int ret;
47999 +
48000 + rcu_read_lock();
48001 + read_lock(&tasklist_lock);
48002 + read_lock(&grsec_exec_file_lock);
48003 + do_each_thread(task2, task) {
48004 + /* check to see if we're called from the exit handler,
48005 + if so, only replace ACLs that have inherited the admin
48006 + ACL */
48007 +
48008 + if (type && (task->role != role ||
48009 + task->acl_role_id != acl_role_id))
48010 + continue;
48011 +
48012 + task->acl_role_id = 0;
48013 + task->acl_sp_role = 0;
48014 +
48015 + if (task->exec_file) {
48016 + cred = __task_cred(task);
48017 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
48018 + ret = gr_apply_subject_to_task(task);
48019 + if (ret) {
48020 + read_unlock(&grsec_exec_file_lock);
48021 + read_unlock(&tasklist_lock);
48022 + rcu_read_unlock();
48023 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
48024 + return ret;
48025 + }
48026 + } else {
48027 + // it's a kernel process
48028 + task->role = kernel_role;
48029 + task->acl = kernel_role->root_label;
48030 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
48031 + task->acl->mode &= ~GR_PROCFIND;
48032 +#endif
48033 + }
48034 + } while_each_thread(task2, task);
48035 + read_unlock(&grsec_exec_file_lock);
48036 + read_unlock(&tasklist_lock);
48037 + rcu_read_unlock();
48038 +
48039 + return 0;
48040 +}
48041 +
48042 +void
48043 +gr_learn_resource(const struct task_struct *task,
48044 + const int res, const unsigned long wanted, const int gt)
48045 +{
48046 + struct acl_subject_label *acl;
48047 + const struct cred *cred;
48048 +
48049 + if (unlikely((gr_status & GR_READY) &&
48050 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
48051 + goto skip_reslog;
48052 +
48053 +#ifdef CONFIG_GRKERNSEC_RESLOG
48054 + gr_log_resource(task, res, wanted, gt);
48055 +#endif
48056 + skip_reslog:
48057 +
48058 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
48059 + return;
48060 +
48061 + acl = task->acl;
48062 +
48063 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
48064 + !(acl->resmask & (1 << (unsigned short) res))))
48065 + return;
48066 +
48067 + if (wanted >= acl->res[res].rlim_cur) {
48068 + unsigned long res_add;
48069 +
48070 + res_add = wanted;
48071 + switch (res) {
48072 + case RLIMIT_CPU:
48073 + res_add += GR_RLIM_CPU_BUMP;
48074 + break;
48075 + case RLIMIT_FSIZE:
48076 + res_add += GR_RLIM_FSIZE_BUMP;
48077 + break;
48078 + case RLIMIT_DATA:
48079 + res_add += GR_RLIM_DATA_BUMP;
48080 + break;
48081 + case RLIMIT_STACK:
48082 + res_add += GR_RLIM_STACK_BUMP;
48083 + break;
48084 + case RLIMIT_CORE:
48085 + res_add += GR_RLIM_CORE_BUMP;
48086 + break;
48087 + case RLIMIT_RSS:
48088 + res_add += GR_RLIM_RSS_BUMP;
48089 + break;
48090 + case RLIMIT_NPROC:
48091 + res_add += GR_RLIM_NPROC_BUMP;
48092 + break;
48093 + case RLIMIT_NOFILE:
48094 + res_add += GR_RLIM_NOFILE_BUMP;
48095 + break;
48096 + case RLIMIT_MEMLOCK:
48097 + res_add += GR_RLIM_MEMLOCK_BUMP;
48098 + break;
48099 + case RLIMIT_AS:
48100 + res_add += GR_RLIM_AS_BUMP;
48101 + break;
48102 + case RLIMIT_LOCKS:
48103 + res_add += GR_RLIM_LOCKS_BUMP;
48104 + break;
48105 + case RLIMIT_SIGPENDING:
48106 + res_add += GR_RLIM_SIGPENDING_BUMP;
48107 + break;
48108 + case RLIMIT_MSGQUEUE:
48109 + res_add += GR_RLIM_MSGQUEUE_BUMP;
48110 + break;
48111 + case RLIMIT_NICE:
48112 + res_add += GR_RLIM_NICE_BUMP;
48113 + break;
48114 + case RLIMIT_RTPRIO:
48115 + res_add += GR_RLIM_RTPRIO_BUMP;
48116 + break;
48117 + case RLIMIT_RTTIME:
48118 + res_add += GR_RLIM_RTTIME_BUMP;
48119 + break;
48120 + }
48121 +
48122 + acl->res[res].rlim_cur = res_add;
48123 +
48124 + if (wanted > acl->res[res].rlim_max)
48125 + acl->res[res].rlim_max = res_add;
48126 +
48127 + /* only log the subject filename, since resource logging is supported for
48128 + single-subject learning only */
48129 + rcu_read_lock();
48130 + cred = __task_cred(task);
48131 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48132 + task->role->roletype, cred->uid, cred->gid, acl->filename,
48133 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
48134 + "", (unsigned long) res, &task->signal->saved_ip);
48135 + rcu_read_unlock();
48136 + }
48137 +
48138 + return;
48139 +}
48140 +
48141 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
48142 +void
48143 +pax_set_initial_flags(struct linux_binprm *bprm)
48144 +{
48145 + struct task_struct *task = current;
48146 + struct acl_subject_label *proc;
48147 + unsigned long flags;
48148 +
48149 + if (unlikely(!(gr_status & GR_READY)))
48150 + return;
48151 +
48152 + flags = pax_get_flags(task);
48153 +
48154 + proc = task->acl;
48155 +
48156 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
48157 + flags &= ~MF_PAX_PAGEEXEC;
48158 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
48159 + flags &= ~MF_PAX_SEGMEXEC;
48160 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
48161 + flags &= ~MF_PAX_RANDMMAP;
48162 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
48163 + flags &= ~MF_PAX_EMUTRAMP;
48164 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
48165 + flags &= ~MF_PAX_MPROTECT;
48166 +
48167 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
48168 + flags |= MF_PAX_PAGEEXEC;
48169 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
48170 + flags |= MF_PAX_SEGMEXEC;
48171 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
48172 + flags |= MF_PAX_RANDMMAP;
48173 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
48174 + flags |= MF_PAX_EMUTRAMP;
48175 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
48176 + flags |= MF_PAX_MPROTECT;
48177 +
48178 + pax_set_flags(task, flags);
48179 +
48180 + return;
48181 +}
48182 +#endif
48183 +
48184 +#ifdef CONFIG_SYSCTL
48185 +/* Eric Biederman likes breaking userland ABI and every inode-based security
48186 + system to save 35kb of memory */
48187 +
48188 +/* we modify the passed in filename, but adjust it back before returning */
48189 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
48190 +{
48191 + struct name_entry *nmatch;
48192 + char *p, *lastp = NULL;
48193 + struct acl_object_label *obj = NULL, *tmp;
48194 + struct acl_subject_label *tmpsubj;
48195 + char c = '\0';
48196 +
48197 + read_lock(&gr_inode_lock);
48198 +
48199 + p = name + len - 1;
48200 + do {
48201 + nmatch = lookup_name_entry(name);
48202 + if (lastp != NULL)
48203 + *lastp = c;
48204 +
48205 + if (nmatch == NULL)
48206 + goto next_component;
48207 + tmpsubj = current->acl;
48208 + do {
48209 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
48210 + if (obj != NULL) {
48211 + tmp = obj->globbed;
48212 + while (tmp) {
48213 + if (!glob_match(tmp->filename, name)) {
48214 + obj = tmp;
48215 + goto found_obj;
48216 + }
48217 + tmp = tmp->next;
48218 + }
48219 + goto found_obj;
48220 + }
48221 + } while ((tmpsubj = tmpsubj->parent_subject));
48222 +next_component:
48223 + /* end case */
48224 + if (p == name)
48225 + break;
48226 +
48227 + while (*p != '/')
48228 + p--;
48229 + if (p == name)
48230 + lastp = p + 1;
48231 + else {
48232 + lastp = p;
48233 + p--;
48234 + }
48235 + c = *lastp;
48236 + *lastp = '\0';
48237 + } while (1);
48238 +found_obj:
48239 + read_unlock(&gr_inode_lock);
48240 + /* obj returned will always be non-null */
48241 + return obj;
48242 +}
48243 +
48244 +/* returns 0 when allowing, non-zero on error
48245 + op of 0 is used for readdir, so we don't log the names of hidden files
48246 +*/
48247 +__u32
48248 +gr_handle_sysctl(const struct ctl_table *table, const int op)
48249 +{
48250 + struct ctl_table *tmp;
48251 + const char *proc_sys = "/proc/sys";
48252 + char *path;
48253 + struct acl_object_label *obj;
48254 + unsigned short len = 0, pos = 0, depth = 0, i;
48255 + __u32 err = 0;
48256 + __u32 mode = 0;
48257 +
48258 + if (unlikely(!(gr_status & GR_READY)))
48259 + return 0;
48260 +
48261 + /* for now, ignore operations on non-sysctl entries if it's not a
48262 + readdir*/
48263 + if (table->child != NULL && op != 0)
48264 + return 0;
48265 +
48266 + mode |= GR_FIND;
48267 + /* it's only a read if it's an entry, read on dirs is for readdir */
48268 + if (op & MAY_READ)
48269 + mode |= GR_READ;
48270 + if (op & MAY_WRITE)
48271 + mode |= GR_WRITE;
48272 +
48273 + preempt_disable();
48274 +
48275 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48276 +
48277 + /* it's only a read/write if it's an actual entry, not a dir
48278 + (which are opened for readdir)
48279 + */
48280 +
48281 + /* convert the requested sysctl entry into a pathname */
48282 +
48283 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48284 + len += strlen(tmp->procname);
48285 + len++;
48286 + depth++;
48287 + }
48288 +
48289 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
48290 + /* deny */
48291 + goto out;
48292 + }
48293 +
48294 + memset(path, 0, PAGE_SIZE);
48295 +
48296 + memcpy(path, proc_sys, strlen(proc_sys));
48297 +
48298 + pos += strlen(proc_sys);
48299 +
48300 + for (; depth > 0; depth--) {
48301 + path[pos] = '/';
48302 + pos++;
48303 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48304 + if (depth == i) {
48305 + memcpy(path + pos, tmp->procname,
48306 + strlen(tmp->procname));
48307 + pos += strlen(tmp->procname);
48308 + }
48309 + i++;
48310 + }
48311 + }
48312 +
48313 + obj = gr_lookup_by_name(path, pos);
48314 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
48315 +
48316 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
48317 + ((err & mode) != mode))) {
48318 + __u32 new_mode = mode;
48319 +
48320 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48321 +
48322 + err = 0;
48323 + gr_log_learn_sysctl(path, new_mode);
48324 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
48325 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
48326 + err = -ENOENT;
48327 + } else if (!(err & GR_FIND)) {
48328 + err = -ENOENT;
48329 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
48330 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
48331 + path, (mode & GR_READ) ? " reading" : "",
48332 + (mode & GR_WRITE) ? " writing" : "");
48333 + err = -EACCES;
48334 + } else if ((err & mode) != mode) {
48335 + err = -EACCES;
48336 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
48337 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
48338 + path, (mode & GR_READ) ? " reading" : "",
48339 + (mode & GR_WRITE) ? " writing" : "");
48340 + err = 0;
48341 + } else
48342 + err = 0;
48343 +
48344 + out:
48345 + preempt_enable();
48346 +
48347 + return err;
48348 +}
48349 +#endif
48350 +
48351 +int
48352 +gr_handle_proc_ptrace(struct task_struct *task)
48353 +{
48354 + struct file *filp;
48355 + struct task_struct *tmp = task;
48356 + struct task_struct *curtemp = current;
48357 + __u32 retmode;
48358 +
48359 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48360 + if (unlikely(!(gr_status & GR_READY)))
48361 + return 0;
48362 +#endif
48363 +
48364 + read_lock(&tasklist_lock);
48365 + read_lock(&grsec_exec_file_lock);
48366 + filp = task->exec_file;
48367 +
48368 + while (tmp->pid > 0) {
48369 + if (tmp == curtemp)
48370 + break;
48371 + tmp = tmp->real_parent;
48372 + }
48373 +
48374 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48375 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
48376 + read_unlock(&grsec_exec_file_lock);
48377 + read_unlock(&tasklist_lock);
48378 + return 1;
48379 + }
48380 +
48381 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48382 + if (!(gr_status & GR_READY)) {
48383 + read_unlock(&grsec_exec_file_lock);
48384 + read_unlock(&tasklist_lock);
48385 + return 0;
48386 + }
48387 +#endif
48388 +
48389 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
48390 + read_unlock(&grsec_exec_file_lock);
48391 + read_unlock(&tasklist_lock);
48392 +
48393 + if (retmode & GR_NOPTRACE)
48394 + return 1;
48395 +
48396 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
48397 + && (current->acl != task->acl || (current->acl != current->role->root_label
48398 + && current->pid != task->pid)))
48399 + return 1;
48400 +
48401 + return 0;
48402 +}
48403 +
48404 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
48405 +{
48406 + if (unlikely(!(gr_status & GR_READY)))
48407 + return;
48408 +
48409 + if (!(current->role->roletype & GR_ROLE_GOD))
48410 + return;
48411 +
48412 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
48413 + p->role->rolename, gr_task_roletype_to_char(p),
48414 + p->acl->filename);
48415 +}
48416 +
48417 +int
48418 +gr_handle_ptrace(struct task_struct *task, const long request)
48419 +{
48420 + struct task_struct *tmp = task;
48421 + struct task_struct *curtemp = current;
48422 + __u32 retmode;
48423 +
48424 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48425 + if (unlikely(!(gr_status & GR_READY)))
48426 + return 0;
48427 +#endif
48428 +
48429 + read_lock(&tasklist_lock);
48430 + while (tmp->pid > 0) {
48431 + if (tmp == curtemp)
48432 + break;
48433 + tmp = tmp->real_parent;
48434 + }
48435 +
48436 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48437 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
48438 + read_unlock(&tasklist_lock);
48439 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48440 + return 1;
48441 + }
48442 + read_unlock(&tasklist_lock);
48443 +
48444 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48445 + if (!(gr_status & GR_READY))
48446 + return 0;
48447 +#endif
48448 +
48449 + read_lock(&grsec_exec_file_lock);
48450 + if (unlikely(!task->exec_file)) {
48451 + read_unlock(&grsec_exec_file_lock);
48452 + return 0;
48453 + }
48454 +
48455 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
48456 + read_unlock(&grsec_exec_file_lock);
48457 +
48458 + if (retmode & GR_NOPTRACE) {
48459 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48460 + return 1;
48461 + }
48462 +
48463 + if (retmode & GR_PTRACERD) {
48464 + switch (request) {
48465 + case PTRACE_POKETEXT:
48466 + case PTRACE_POKEDATA:
48467 + case PTRACE_POKEUSR:
48468 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
48469 + case PTRACE_SETREGS:
48470 + case PTRACE_SETFPREGS:
48471 +#endif
48472 +#ifdef CONFIG_X86
48473 + case PTRACE_SETFPXREGS:
48474 +#endif
48475 +#ifdef CONFIG_ALTIVEC
48476 + case PTRACE_SETVRREGS:
48477 +#endif
48478 + return 1;
48479 + default:
48480 + return 0;
48481 + }
48482 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
48483 + !(current->role->roletype & GR_ROLE_GOD) &&
48484 + (current->acl != task->acl)) {
48485 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48486 + return 1;
48487 + }
48488 +
48489 + return 0;
48490 +}
48491 +
48492 +static int is_writable_mmap(const struct file *filp)
48493 +{
48494 + struct task_struct *task = current;
48495 + struct acl_object_label *obj, *obj2;
48496 +
48497 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
48498 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
48499 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48500 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
48501 + task->role->root_label);
48502 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
48503 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
48504 + return 1;
48505 + }
48506 + }
48507 + return 0;
48508 +}
48509 +
48510 +int
48511 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
48512 +{
48513 + __u32 mode;
48514 +
48515 + if (unlikely(!file || !(prot & PROT_EXEC)))
48516 + return 1;
48517 +
48518 + if (is_writable_mmap(file))
48519 + return 0;
48520 +
48521 + mode =
48522 + gr_search_file(file->f_path.dentry,
48523 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48524 + file->f_path.mnt);
48525 +
48526 + if (!gr_tpe_allow(file))
48527 + return 0;
48528 +
48529 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48530 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48531 + return 0;
48532 + } else if (unlikely(!(mode & GR_EXEC))) {
48533 + return 0;
48534 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48535 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48536 + return 1;
48537 + }
48538 +
48539 + return 1;
48540 +}
48541 +
48542 +int
48543 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
48544 +{
48545 + __u32 mode;
48546 +
48547 + if (unlikely(!file || !(prot & PROT_EXEC)))
48548 + return 1;
48549 +
48550 + if (is_writable_mmap(file))
48551 + return 0;
48552 +
48553 + mode =
48554 + gr_search_file(file->f_path.dentry,
48555 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48556 + file->f_path.mnt);
48557 +
48558 + if (!gr_tpe_allow(file))
48559 + return 0;
48560 +
48561 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48562 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48563 + return 0;
48564 + } else if (unlikely(!(mode & GR_EXEC))) {
48565 + return 0;
48566 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48567 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48568 + return 1;
48569 + }
48570 +
48571 + return 1;
48572 +}
48573 +
48574 +void
48575 +gr_acl_handle_psacct(struct task_struct *task, const long code)
48576 +{
48577 + unsigned long runtime;
48578 + unsigned long cputime;
48579 + unsigned int wday, cday;
48580 + __u8 whr, chr;
48581 + __u8 wmin, cmin;
48582 + __u8 wsec, csec;
48583 + struct timespec timeval;
48584 +
48585 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48586 + !(task->acl->mode & GR_PROCACCT)))
48587 + return;
48588 +
48589 + do_posix_clock_monotonic_gettime(&timeval);
48590 + runtime = timeval.tv_sec - task->start_time.tv_sec;
48591 + wday = runtime / (3600 * 24);
48592 + runtime -= wday * (3600 * 24);
48593 + whr = runtime / 3600;
48594 + runtime -= whr * 3600;
48595 + wmin = runtime / 60;
48596 + runtime -= wmin * 60;
48597 + wsec = runtime;
48598 +
48599 + cputime = (task->utime + task->stime) / HZ;
48600 + cday = cputime / (3600 * 24);
48601 + cputime -= cday * (3600 * 24);
48602 + chr = cputime / 3600;
48603 + cputime -= chr * 3600;
48604 + cmin = cputime / 60;
48605 + cputime -= cmin * 60;
48606 + csec = cputime;
48607 +
48608 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48609 +
48610 + return;
48611 +}
48612 +
48613 +void gr_set_kernel_label(struct task_struct *task)
48614 +{
48615 + if (gr_status & GR_READY) {
48616 + task->role = kernel_role;
48617 + task->acl = kernel_role->root_label;
48618 + }
48619 + return;
48620 +}
48621 +
48622 +#ifdef CONFIG_TASKSTATS
48623 +int gr_is_taskstats_denied(int pid)
48624 +{
48625 + struct task_struct *task;
48626 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48627 + const struct cred *cred;
48628 +#endif
48629 + int ret = 0;
48630 +
48631 + /* restrict taskstats viewing to un-chrooted root users
48632 + who have the 'view' subject flag if the RBAC system is enabled
48633 + */
48634 +
48635 + rcu_read_lock();
48636 + read_lock(&tasklist_lock);
48637 + task = find_task_by_vpid(pid);
48638 + if (task) {
48639 +#ifdef CONFIG_GRKERNSEC_CHROOT
48640 + if (proc_is_chrooted(task))
48641 + ret = -EACCES;
48642 +#endif
48643 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48644 + cred = __task_cred(task);
48645 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48646 + if (cred->uid != 0)
48647 + ret = -EACCES;
48648 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48649 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48650 + ret = -EACCES;
48651 +#endif
48652 +#endif
48653 + if (gr_status & GR_READY) {
48654 + if (!(task->acl->mode & GR_VIEW))
48655 + ret = -EACCES;
48656 + }
48657 + } else
48658 + ret = -ENOENT;
48659 +
48660 + read_unlock(&tasklist_lock);
48661 + rcu_read_unlock();
48662 +
48663 + return ret;
48664 +}
48665 +#endif
48666 +
48667 +/* AUXV entries are filled via a descendant of search_binary_handler
48668 + after we've already applied the subject for the target
48669 +*/
48670 +int gr_acl_enable_at_secure(void)
48671 +{
48672 + if (unlikely(!(gr_status & GR_READY)))
48673 + return 0;
48674 +
48675 + if (current->acl->mode & GR_ATSECURE)
48676 + return 1;
48677 +
48678 + return 0;
48679 +}
48680 +
48681 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48682 +{
48683 + struct task_struct *task = current;
48684 + struct dentry *dentry = file->f_path.dentry;
48685 + struct vfsmount *mnt = file->f_path.mnt;
48686 + struct acl_object_label *obj, *tmp;
48687 + struct acl_subject_label *subj;
48688 + unsigned int bufsize;
48689 + int is_not_root;
48690 + char *path;
48691 + dev_t dev = __get_dev(dentry);
48692 +
48693 + if (unlikely(!(gr_status & GR_READY)))
48694 + return 1;
48695 +
48696 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48697 + return 1;
48698 +
48699 + /* ignore Eric Biederman */
48700 + if (IS_PRIVATE(dentry->d_inode))
48701 + return 1;
48702 +
48703 + subj = task->acl;
48704 + do {
48705 + obj = lookup_acl_obj_label(ino, dev, subj);
48706 + if (obj != NULL)
48707 + return (obj->mode & GR_FIND) ? 1 : 0;
48708 + } while ((subj = subj->parent_subject));
48709 +
48710 + /* this is purely an optimization since we're looking for an object
48711 + for the directory we're doing a readdir on
48712 + if it's possible for any globbed object to match the entry we're
48713 + filling into the directory, then the object we find here will be
48714 + an anchor point with attached globbed objects
48715 + */
48716 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48717 + if (obj->globbed == NULL)
48718 + return (obj->mode & GR_FIND) ? 1 : 0;
48719 +
48720 + is_not_root = ((obj->filename[0] == '/') &&
48721 + (obj->filename[1] == '\0')) ? 0 : 1;
48722 + bufsize = PAGE_SIZE - namelen - is_not_root;
48723 +
48724 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
48725 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48726 + return 1;
48727 +
48728 + preempt_disable();
48729 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48730 + bufsize);
48731 +
48732 + bufsize = strlen(path);
48733 +
48734 + /* if base is "/", don't append an additional slash */
48735 + if (is_not_root)
48736 + *(path + bufsize) = '/';
48737 + memcpy(path + bufsize + is_not_root, name, namelen);
48738 + *(path + bufsize + namelen + is_not_root) = '\0';
48739 +
48740 + tmp = obj->globbed;
48741 + while (tmp) {
48742 + if (!glob_match(tmp->filename, path)) {
48743 + preempt_enable();
48744 + return (tmp->mode & GR_FIND) ? 1 : 0;
48745 + }
48746 + tmp = tmp->next;
48747 + }
48748 + preempt_enable();
48749 + return (obj->mode & GR_FIND) ? 1 : 0;
48750 +}
48751 +
48752 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48753 +EXPORT_SYMBOL(gr_acl_is_enabled);
48754 +#endif
48755 +EXPORT_SYMBOL(gr_learn_resource);
48756 +EXPORT_SYMBOL(gr_set_kernel_label);
48757 +#ifdef CONFIG_SECURITY
48758 +EXPORT_SYMBOL(gr_check_user_change);
48759 +EXPORT_SYMBOL(gr_check_group_change);
48760 +#endif
48761 +
48762 diff -urNp linux-3.0.4/grsecurity/gracl_cap.c linux-3.0.4/grsecurity/gracl_cap.c
48763 --- linux-3.0.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48764 +++ linux-3.0.4/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
48765 @@ -0,0 +1,101 @@
48766 +#include <linux/kernel.h>
48767 +#include <linux/module.h>
48768 +#include <linux/sched.h>
48769 +#include <linux/gracl.h>
48770 +#include <linux/grsecurity.h>
48771 +#include <linux/grinternal.h>
48772 +
48773 +extern const char *captab_log[];
48774 +extern int captab_log_entries;
48775 +
48776 +int
48777 +gr_acl_is_capable(const int cap)
48778 +{
48779 + struct task_struct *task = current;
48780 + const struct cred *cred = current_cred();
48781 + struct acl_subject_label *curracl;
48782 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48783 + kernel_cap_t cap_audit = __cap_empty_set;
48784 +
48785 + if (!gr_acl_is_enabled())
48786 + return 1;
48787 +
48788 + curracl = task->acl;
48789 +
48790 + cap_drop = curracl->cap_lower;
48791 + cap_mask = curracl->cap_mask;
48792 + cap_audit = curracl->cap_invert_audit;
48793 +
48794 + while ((curracl = curracl->parent_subject)) {
48795 + /* if the cap isn't specified in the current computed mask but is specified in the
48796 + current level subject, and is lowered in the current level subject, then add
48797 + it to the set of dropped capabilities
48798 + otherwise, add the current level subject's mask to the current computed mask
48799 + */
48800 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48801 + cap_raise(cap_mask, cap);
48802 + if (cap_raised(curracl->cap_lower, cap))
48803 + cap_raise(cap_drop, cap);
48804 + if (cap_raised(curracl->cap_invert_audit, cap))
48805 + cap_raise(cap_audit, cap);
48806 + }
48807 + }
48808 +
48809 + if (!cap_raised(cap_drop, cap)) {
48810 + if (cap_raised(cap_audit, cap))
48811 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48812 + return 1;
48813 + }
48814 +
48815 + curracl = task->acl;
48816 +
48817 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48818 + && cap_raised(cred->cap_effective, cap)) {
48819 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48820 + task->role->roletype, cred->uid,
48821 + cred->gid, task->exec_file ?
48822 + gr_to_filename(task->exec_file->f_path.dentry,
48823 + task->exec_file->f_path.mnt) : curracl->filename,
48824 + curracl->filename, 0UL,
48825 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48826 + return 1;
48827 + }
48828 +
48829 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48830 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48831 + return 0;
48832 +}
48833 +
48834 +int
48835 +gr_acl_is_capable_nolog(const int cap)
48836 +{
48837 + struct acl_subject_label *curracl;
48838 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48839 +
48840 + if (!gr_acl_is_enabled())
48841 + return 1;
48842 +
48843 + curracl = current->acl;
48844 +
48845 + cap_drop = curracl->cap_lower;
48846 + cap_mask = curracl->cap_mask;
48847 +
48848 + while ((curracl = curracl->parent_subject)) {
48849 + /* if the cap isn't specified in the current computed mask but is specified in the
48850 + current level subject, and is lowered in the current level subject, then add
48851 + it to the set of dropped capabilities
48852 + otherwise, add the current level subject's mask to the current computed mask
48853 + */
48854 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48855 + cap_raise(cap_mask, cap);
48856 + if (cap_raised(curracl->cap_lower, cap))
48857 + cap_raise(cap_drop, cap);
48858 + }
48859 + }
48860 +
48861 + if (!cap_raised(cap_drop, cap))
48862 + return 1;
48863 +
48864 + return 0;
48865 +}
48866 +
48867 diff -urNp linux-3.0.4/grsecurity/gracl_fs.c linux-3.0.4/grsecurity/gracl_fs.c
48868 --- linux-3.0.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48869 +++ linux-3.0.4/grsecurity/gracl_fs.c 2011-08-23 21:48:14.000000000 -0400
48870 @@ -0,0 +1,431 @@
48871 +#include <linux/kernel.h>
48872 +#include <linux/sched.h>
48873 +#include <linux/types.h>
48874 +#include <linux/fs.h>
48875 +#include <linux/file.h>
48876 +#include <linux/stat.h>
48877 +#include <linux/grsecurity.h>
48878 +#include <linux/grinternal.h>
48879 +#include <linux/gracl.h>
48880 +
48881 +__u32
48882 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48883 + const struct vfsmount * mnt)
48884 +{
48885 + __u32 mode;
48886 +
48887 + if (unlikely(!dentry->d_inode))
48888 + return GR_FIND;
48889 +
48890 + mode =
48891 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48892 +
48893 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48894 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48895 + return mode;
48896 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48897 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48898 + return 0;
48899 + } else if (unlikely(!(mode & GR_FIND)))
48900 + return 0;
48901 +
48902 + return GR_FIND;
48903 +}
48904 +
48905 +__u32
48906 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48907 + const int fmode)
48908 +{
48909 + __u32 reqmode = GR_FIND;
48910 + __u32 mode;
48911 +
48912 + if (unlikely(!dentry->d_inode))
48913 + return reqmode;
48914 +
48915 + if (unlikely(fmode & O_APPEND))
48916 + reqmode |= GR_APPEND;
48917 + else if (unlikely(fmode & FMODE_WRITE))
48918 + reqmode |= GR_WRITE;
48919 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48920 + reqmode |= GR_READ;
48921 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
48922 + reqmode &= ~GR_READ;
48923 + mode =
48924 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48925 + mnt);
48926 +
48927 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48928 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48929 + reqmode & GR_READ ? " reading" : "",
48930 + reqmode & GR_WRITE ? " writing" : reqmode &
48931 + GR_APPEND ? " appending" : "");
48932 + return reqmode;
48933 + } else
48934 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48935 + {
48936 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48937 + reqmode & GR_READ ? " reading" : "",
48938 + reqmode & GR_WRITE ? " writing" : reqmode &
48939 + GR_APPEND ? " appending" : "");
48940 + return 0;
48941 + } else if (unlikely((mode & reqmode) != reqmode))
48942 + return 0;
48943 +
48944 + return reqmode;
48945 +}
48946 +
48947 +__u32
48948 +gr_acl_handle_creat(const struct dentry * dentry,
48949 + const struct dentry * p_dentry,
48950 + const struct vfsmount * p_mnt, const int fmode,
48951 + const int imode)
48952 +{
48953 + __u32 reqmode = GR_WRITE | GR_CREATE;
48954 + __u32 mode;
48955 +
48956 + if (unlikely(fmode & O_APPEND))
48957 + reqmode |= GR_APPEND;
48958 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48959 + reqmode |= GR_READ;
48960 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48961 + reqmode |= GR_SETID;
48962 +
48963 + mode =
48964 + gr_check_create(dentry, p_dentry, p_mnt,
48965 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48966 +
48967 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48968 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48969 + reqmode & GR_READ ? " reading" : "",
48970 + reqmode & GR_WRITE ? " writing" : reqmode &
48971 + GR_APPEND ? " appending" : "");
48972 + return reqmode;
48973 + } else
48974 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48975 + {
48976 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48977 + reqmode & GR_READ ? " reading" : "",
48978 + reqmode & GR_WRITE ? " writing" : reqmode &
48979 + GR_APPEND ? " appending" : "");
48980 + return 0;
48981 + } else if (unlikely((mode & reqmode) != reqmode))
48982 + return 0;
48983 +
48984 + return reqmode;
48985 +}
48986 +
48987 +__u32
48988 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48989 + const int fmode)
48990 +{
48991 + __u32 mode, reqmode = GR_FIND;
48992 +
48993 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48994 + reqmode |= GR_EXEC;
48995 + if (fmode & S_IWOTH)
48996 + reqmode |= GR_WRITE;
48997 + if (fmode & S_IROTH)
48998 + reqmode |= GR_READ;
48999 +
49000 + mode =
49001 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
49002 + mnt);
49003 +
49004 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
49005 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
49006 + reqmode & GR_READ ? " reading" : "",
49007 + reqmode & GR_WRITE ? " writing" : "",
49008 + reqmode & GR_EXEC ? " executing" : "");
49009 + return reqmode;
49010 + } else
49011 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
49012 + {
49013 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
49014 + reqmode & GR_READ ? " reading" : "",
49015 + reqmode & GR_WRITE ? " writing" : "",
49016 + reqmode & GR_EXEC ? " executing" : "");
49017 + return 0;
49018 + } else if (unlikely((mode & reqmode) != reqmode))
49019 + return 0;
49020 +
49021 + return reqmode;
49022 +}
49023 +
49024 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
49025 +{
49026 + __u32 mode;
49027 +
49028 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
49029 +
49030 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49031 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
49032 + return mode;
49033 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49034 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
49035 + return 0;
49036 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
49037 + return 0;
49038 +
49039 + return (reqmode);
49040 +}
49041 +
49042 +__u32
49043 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
49044 +{
49045 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
49046 +}
49047 +
49048 +__u32
49049 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
49050 +{
49051 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
49052 +}
49053 +
49054 +__u32
49055 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
49056 +{
49057 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
49058 +}
49059 +
49060 +__u32
49061 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
49062 +{
49063 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
49064 +}
49065 +
49066 +__u32
49067 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
49068 + mode_t mode)
49069 +{
49070 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
49071 + return 1;
49072 +
49073 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
49074 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
49075 + GR_FCHMOD_ACL_MSG);
49076 + } else {
49077 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
49078 + }
49079 +}
49080 +
49081 +__u32
49082 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
49083 + mode_t mode)
49084 +{
49085 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
49086 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
49087 + GR_CHMOD_ACL_MSG);
49088 + } else {
49089 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
49090 + }
49091 +}
49092 +
49093 +__u32
49094 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
49095 +{
49096 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
49097 +}
49098 +
49099 +__u32
49100 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
49101 +{
49102 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
49103 +}
49104 +
49105 +__u32
49106 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
49107 +{
49108 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
49109 +}
49110 +
49111 +__u32
49112 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
49113 +{
49114 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
49115 + GR_UNIXCONNECT_ACL_MSG);
49116 +}
49117 +
49118 +/* hardlinks require at minimum create permission,
49119 + any additional privilege required is based on the
49120 + privilege of the file being linked to
49121 +*/
49122 +__u32
49123 +gr_acl_handle_link(const struct dentry * new_dentry,
49124 + const struct dentry * parent_dentry,
49125 + const struct vfsmount * parent_mnt,
49126 + const struct dentry * old_dentry,
49127 + const struct vfsmount * old_mnt, const char *to)
49128 +{
49129 + __u32 mode;
49130 + __u32 needmode = GR_CREATE | GR_LINK;
49131 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
49132 +
49133 + mode =
49134 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
49135 + old_mnt);
49136 +
49137 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
49138 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49139 + return mode;
49140 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49141 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49142 + return 0;
49143 + } else if (unlikely((mode & needmode) != needmode))
49144 + return 0;
49145 +
49146 + return 1;
49147 +}
49148 +
49149 +__u32
49150 +gr_acl_handle_symlink(const struct dentry * new_dentry,
49151 + const struct dentry * parent_dentry,
49152 + const struct vfsmount * parent_mnt, const char *from)
49153 +{
49154 + __u32 needmode = GR_WRITE | GR_CREATE;
49155 + __u32 mode;
49156 +
49157 + mode =
49158 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
49159 + GR_CREATE | GR_AUDIT_CREATE |
49160 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
49161 +
49162 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
49163 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49164 + return mode;
49165 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49166 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49167 + return 0;
49168 + } else if (unlikely((mode & needmode) != needmode))
49169 + return 0;
49170 +
49171 + return (GR_WRITE | GR_CREATE);
49172 +}
49173 +
49174 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
49175 +{
49176 + __u32 mode;
49177 +
49178 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49179 +
49180 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49181 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
49182 + return mode;
49183 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49184 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
49185 + return 0;
49186 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
49187 + return 0;
49188 +
49189 + return (reqmode);
49190 +}
49191 +
49192 +__u32
49193 +gr_acl_handle_mknod(const struct dentry * new_dentry,
49194 + const struct dentry * parent_dentry,
49195 + const struct vfsmount * parent_mnt,
49196 + const int mode)
49197 +{
49198 + __u32 reqmode = GR_WRITE | GR_CREATE;
49199 + if (unlikely(mode & (S_ISUID | S_ISGID)))
49200 + reqmode |= GR_SETID;
49201 +
49202 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49203 + reqmode, GR_MKNOD_ACL_MSG);
49204 +}
49205 +
49206 +__u32
49207 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
49208 + const struct dentry *parent_dentry,
49209 + const struct vfsmount *parent_mnt)
49210 +{
49211 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49212 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
49213 +}
49214 +
49215 +#define RENAME_CHECK_SUCCESS(old, new) \
49216 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
49217 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
49218 +
49219 +int
49220 +gr_acl_handle_rename(struct dentry *new_dentry,
49221 + struct dentry *parent_dentry,
49222 + const struct vfsmount *parent_mnt,
49223 + struct dentry *old_dentry,
49224 + struct inode *old_parent_inode,
49225 + struct vfsmount *old_mnt, const char *newname)
49226 +{
49227 + __u32 comp1, comp2;
49228 + int error = 0;
49229 +
49230 + if (unlikely(!gr_acl_is_enabled()))
49231 + return 0;
49232 +
49233 + if (!new_dentry->d_inode) {
49234 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
49235 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
49236 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
49237 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
49238 + GR_DELETE | GR_AUDIT_DELETE |
49239 + GR_AUDIT_READ | GR_AUDIT_WRITE |
49240 + GR_SUPPRESS, old_mnt);
49241 + } else {
49242 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
49243 + GR_CREATE | GR_DELETE |
49244 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
49245 + GR_AUDIT_READ | GR_AUDIT_WRITE |
49246 + GR_SUPPRESS, parent_mnt);
49247 + comp2 =
49248 + gr_search_file(old_dentry,
49249 + GR_READ | GR_WRITE | GR_AUDIT_READ |
49250 + GR_DELETE | GR_AUDIT_DELETE |
49251 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
49252 + }
49253 +
49254 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
49255 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
49256 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49257 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
49258 + && !(comp2 & GR_SUPPRESS)) {
49259 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49260 + error = -EACCES;
49261 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
49262 + error = -EACCES;
49263 +
49264 + return error;
49265 +}
49266 +
49267 +void
49268 +gr_acl_handle_exit(void)
49269 +{
49270 + u16 id;
49271 + char *rolename;
49272 + struct file *exec_file;
49273 +
49274 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
49275 + !(current->role->roletype & GR_ROLE_PERSIST))) {
49276 + id = current->acl_role_id;
49277 + rolename = current->role->rolename;
49278 + gr_set_acls(1);
49279 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
49280 + }
49281 +
49282 + write_lock(&grsec_exec_file_lock);
49283 + exec_file = current->exec_file;
49284 + current->exec_file = NULL;
49285 + write_unlock(&grsec_exec_file_lock);
49286 +
49287 + if (exec_file)
49288 + fput(exec_file);
49289 +}
49290 +
49291 +int
49292 +gr_acl_handle_procpidmem(const struct task_struct *task)
49293 +{
49294 + if (unlikely(!gr_acl_is_enabled()))
49295 + return 0;
49296 +
49297 + if (task != current && task->acl->mode & GR_PROTPROCFD)
49298 + return -EACCES;
49299 +
49300 + return 0;
49301 +}
49302 diff -urNp linux-3.0.4/grsecurity/gracl_ip.c linux-3.0.4/grsecurity/gracl_ip.c
49303 --- linux-3.0.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
49304 +++ linux-3.0.4/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
49305 @@ -0,0 +1,381 @@
49306 +#include <linux/kernel.h>
49307 +#include <asm/uaccess.h>
49308 +#include <asm/errno.h>
49309 +#include <net/sock.h>
49310 +#include <linux/file.h>
49311 +#include <linux/fs.h>
49312 +#include <linux/net.h>
49313 +#include <linux/in.h>
49314 +#include <linux/skbuff.h>
49315 +#include <linux/ip.h>
49316 +#include <linux/udp.h>
49317 +#include <linux/types.h>
49318 +#include <linux/sched.h>
49319 +#include <linux/netdevice.h>
49320 +#include <linux/inetdevice.h>
49321 +#include <linux/gracl.h>
49322 +#include <linux/grsecurity.h>
49323 +#include <linux/grinternal.h>
49324 +
49325 +#define GR_BIND 0x01
49326 +#define GR_CONNECT 0x02
49327 +#define GR_INVERT 0x04
49328 +#define GR_BINDOVERRIDE 0x08
49329 +#define GR_CONNECTOVERRIDE 0x10
49330 +#define GR_SOCK_FAMILY 0x20
49331 +
49332 +static const char * gr_protocols[IPPROTO_MAX] = {
49333 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
49334 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
49335 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
49336 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
49337 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
49338 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
49339 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
49340 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
49341 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
49342 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
49343 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
49344 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
49345 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
49346 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
49347 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
49348 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
49349 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
49350 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
49351 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
49352 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
49353 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
49354 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
49355 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
49356 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
49357 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
49358 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
49359 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
49360 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
49361 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
49362 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
49363 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
49364 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
49365 + };
49366 +
49367 +static const char * gr_socktypes[SOCK_MAX] = {
49368 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
49369 + "unknown:7", "unknown:8", "unknown:9", "packet"
49370 + };
49371 +
49372 +static const char * gr_sockfamilies[AF_MAX+1] = {
49373 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
49374 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
49375 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
49376 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
49377 + };
49378 +
49379 +const char *
49380 +gr_proto_to_name(unsigned char proto)
49381 +{
49382 + return gr_protocols[proto];
49383 +}
49384 +
49385 +const char *
49386 +gr_socktype_to_name(unsigned char type)
49387 +{
49388 + return gr_socktypes[type];
49389 +}
49390 +
49391 +const char *
49392 +gr_sockfamily_to_name(unsigned char family)
49393 +{
49394 + return gr_sockfamilies[family];
49395 +}
49396 +
49397 +int
49398 +gr_search_socket(const int domain, const int type, const int protocol)
49399 +{
49400 + struct acl_subject_label *curr;
49401 + const struct cred *cred = current_cred();
49402 +
49403 + if (unlikely(!gr_acl_is_enabled()))
49404 + goto exit;
49405 +
49406 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
49407 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
49408 + goto exit; // let the kernel handle it
49409 +
49410 + curr = current->acl;
49411 +
49412 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
49413 + /* the family is allowed, if this is PF_INET allow it only if
49414 + the extra sock type/protocol checks pass */
49415 + if (domain == PF_INET)
49416 + goto inet_check;
49417 + goto exit;
49418 + } else {
49419 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49420 + __u32 fakeip = 0;
49421 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49422 + current->role->roletype, cred->uid,
49423 + cred->gid, current->exec_file ?
49424 + gr_to_filename(current->exec_file->f_path.dentry,
49425 + current->exec_file->f_path.mnt) :
49426 + curr->filename, curr->filename,
49427 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
49428 + &current->signal->saved_ip);
49429 + goto exit;
49430 + }
49431 + goto exit_fail;
49432 + }
49433 +
49434 +inet_check:
49435 + /* the rest of this checking is for IPv4 only */
49436 + if (!curr->ips)
49437 + goto exit;
49438 +
49439 + if ((curr->ip_type & (1 << type)) &&
49440 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
49441 + goto exit;
49442 +
49443 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49444 + /* we don't place acls on raw sockets , and sometimes
49445 + dgram/ip sockets are opened for ioctl and not
49446 + bind/connect, so we'll fake a bind learn log */
49447 + if (type == SOCK_RAW || type == SOCK_PACKET) {
49448 + __u32 fakeip = 0;
49449 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49450 + current->role->roletype, cred->uid,
49451 + cred->gid, current->exec_file ?
49452 + gr_to_filename(current->exec_file->f_path.dentry,
49453 + current->exec_file->f_path.mnt) :
49454 + curr->filename, curr->filename,
49455 + &fakeip, 0, type,
49456 + protocol, GR_CONNECT, &current->signal->saved_ip);
49457 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
49458 + __u32 fakeip = 0;
49459 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49460 + current->role->roletype, cred->uid,
49461 + cred->gid, current->exec_file ?
49462 + gr_to_filename(current->exec_file->f_path.dentry,
49463 + current->exec_file->f_path.mnt) :
49464 + curr->filename, curr->filename,
49465 + &fakeip, 0, type,
49466 + protocol, GR_BIND, &current->signal->saved_ip);
49467 + }
49468 + /* we'll log when they use connect or bind */
49469 + goto exit;
49470 + }
49471 +
49472 +exit_fail:
49473 + if (domain == PF_INET)
49474 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
49475 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
49476 + else
49477 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
49478 + gr_socktype_to_name(type), protocol);
49479 +
49480 + return 0;
49481 +exit:
49482 + return 1;
49483 +}
49484 +
49485 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
49486 +{
49487 + if ((ip->mode & mode) &&
49488 + (ip_port >= ip->low) &&
49489 + (ip_port <= ip->high) &&
49490 + ((ntohl(ip_addr) & our_netmask) ==
49491 + (ntohl(our_addr) & our_netmask))
49492 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
49493 + && (ip->type & (1 << type))) {
49494 + if (ip->mode & GR_INVERT)
49495 + return 2; // specifically denied
49496 + else
49497 + return 1; // allowed
49498 + }
49499 +
49500 + return 0; // not specifically allowed, may continue parsing
49501 +}
49502 +
49503 +static int
49504 +gr_search_connectbind(const int full_mode, struct sock *sk,
49505 + struct sockaddr_in *addr, const int type)
49506 +{
49507 + char iface[IFNAMSIZ] = {0};
49508 + struct acl_subject_label *curr;
49509 + struct acl_ip_label *ip;
49510 + struct inet_sock *isk;
49511 + struct net_device *dev;
49512 + struct in_device *idev;
49513 + unsigned long i;
49514 + int ret;
49515 + int mode = full_mode & (GR_BIND | GR_CONNECT);
49516 + __u32 ip_addr = 0;
49517 + __u32 our_addr;
49518 + __u32 our_netmask;
49519 + char *p;
49520 + __u16 ip_port = 0;
49521 + const struct cred *cred = current_cred();
49522 +
49523 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49524 + return 0;
49525 +
49526 + curr = current->acl;
49527 + isk = inet_sk(sk);
49528 +
49529 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49530 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49531 + addr->sin_addr.s_addr = curr->inaddr_any_override;
49532 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49533 + struct sockaddr_in saddr;
49534 + int err;
49535 +
49536 + saddr.sin_family = AF_INET;
49537 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
49538 + saddr.sin_port = isk->inet_sport;
49539 +
49540 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49541 + if (err)
49542 + return err;
49543 +
49544 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49545 + if (err)
49546 + return err;
49547 + }
49548 +
49549 + if (!curr->ips)
49550 + return 0;
49551 +
49552 + ip_addr = addr->sin_addr.s_addr;
49553 + ip_port = ntohs(addr->sin_port);
49554 +
49555 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49556 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49557 + current->role->roletype, cred->uid,
49558 + cred->gid, current->exec_file ?
49559 + gr_to_filename(current->exec_file->f_path.dentry,
49560 + current->exec_file->f_path.mnt) :
49561 + curr->filename, curr->filename,
49562 + &ip_addr, ip_port, type,
49563 + sk->sk_protocol, mode, &current->signal->saved_ip);
49564 + return 0;
49565 + }
49566 +
49567 + for (i = 0; i < curr->ip_num; i++) {
49568 + ip = *(curr->ips + i);
49569 + if (ip->iface != NULL) {
49570 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
49571 + p = strchr(iface, ':');
49572 + if (p != NULL)
49573 + *p = '\0';
49574 + dev = dev_get_by_name(sock_net(sk), iface);
49575 + if (dev == NULL)
49576 + continue;
49577 + idev = in_dev_get(dev);
49578 + if (idev == NULL) {
49579 + dev_put(dev);
49580 + continue;
49581 + }
49582 + rcu_read_lock();
49583 + for_ifa(idev) {
49584 + if (!strcmp(ip->iface, ifa->ifa_label)) {
49585 + our_addr = ifa->ifa_address;
49586 + our_netmask = 0xffffffff;
49587 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49588 + if (ret == 1) {
49589 + rcu_read_unlock();
49590 + in_dev_put(idev);
49591 + dev_put(dev);
49592 + return 0;
49593 + } else if (ret == 2) {
49594 + rcu_read_unlock();
49595 + in_dev_put(idev);
49596 + dev_put(dev);
49597 + goto denied;
49598 + }
49599 + }
49600 + } endfor_ifa(idev);
49601 + rcu_read_unlock();
49602 + in_dev_put(idev);
49603 + dev_put(dev);
49604 + } else {
49605 + our_addr = ip->addr;
49606 + our_netmask = ip->netmask;
49607 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49608 + if (ret == 1)
49609 + return 0;
49610 + else if (ret == 2)
49611 + goto denied;
49612 + }
49613 + }
49614 +
49615 +denied:
49616 + if (mode == GR_BIND)
49617 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49618 + else if (mode == GR_CONNECT)
49619 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49620 +
49621 + return -EACCES;
49622 +}
49623 +
49624 +int
49625 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49626 +{
49627 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49628 +}
49629 +
49630 +int
49631 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49632 +{
49633 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49634 +}
49635 +
49636 +int gr_search_listen(struct socket *sock)
49637 +{
49638 + struct sock *sk = sock->sk;
49639 + struct sockaddr_in addr;
49640 +
49641 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49642 + addr.sin_port = inet_sk(sk)->inet_sport;
49643 +
49644 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49645 +}
49646 +
49647 +int gr_search_accept(struct socket *sock)
49648 +{
49649 + struct sock *sk = sock->sk;
49650 + struct sockaddr_in addr;
49651 +
49652 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
49653 + addr.sin_port = inet_sk(sk)->inet_sport;
49654 +
49655 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49656 +}
49657 +
49658 +int
49659 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49660 +{
49661 + if (addr)
49662 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49663 + else {
49664 + struct sockaddr_in sin;
49665 + const struct inet_sock *inet = inet_sk(sk);
49666 +
49667 + sin.sin_addr.s_addr = inet->inet_daddr;
49668 + sin.sin_port = inet->inet_dport;
49669 +
49670 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49671 + }
49672 +}
49673 +
49674 +int
49675 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49676 +{
49677 + struct sockaddr_in sin;
49678 +
49679 + if (unlikely(skb->len < sizeof (struct udphdr)))
49680 + return 0; // skip this packet
49681 +
49682 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49683 + sin.sin_port = udp_hdr(skb)->source;
49684 +
49685 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49686 +}
49687 diff -urNp linux-3.0.4/grsecurity/gracl_learn.c linux-3.0.4/grsecurity/gracl_learn.c
49688 --- linux-3.0.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49689 +++ linux-3.0.4/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
49690 @@ -0,0 +1,207 @@
49691 +#include <linux/kernel.h>
49692 +#include <linux/mm.h>
49693 +#include <linux/sched.h>
49694 +#include <linux/poll.h>
49695 +#include <linux/string.h>
49696 +#include <linux/file.h>
49697 +#include <linux/types.h>
49698 +#include <linux/vmalloc.h>
49699 +#include <linux/grinternal.h>
49700 +
49701 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49702 + size_t count, loff_t *ppos);
49703 +extern int gr_acl_is_enabled(void);
49704 +
49705 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49706 +static int gr_learn_attached;
49707 +
49708 +/* use a 512k buffer */
49709 +#define LEARN_BUFFER_SIZE (512 * 1024)
49710 +
49711 +static DEFINE_SPINLOCK(gr_learn_lock);
49712 +static DEFINE_MUTEX(gr_learn_user_mutex);
49713 +
49714 +/* we need to maintain two buffers, so that the kernel context of grlearn
49715 + uses a semaphore around the userspace copying, and the other kernel contexts
49716 + use a spinlock when copying into the buffer, since they cannot sleep
49717 +*/
49718 +static char *learn_buffer;
49719 +static char *learn_buffer_user;
49720 +static int learn_buffer_len;
49721 +static int learn_buffer_user_len;
49722 +
49723 +static ssize_t
49724 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49725 +{
49726 + DECLARE_WAITQUEUE(wait, current);
49727 + ssize_t retval = 0;
49728 +
49729 + add_wait_queue(&learn_wait, &wait);
49730 + set_current_state(TASK_INTERRUPTIBLE);
49731 + do {
49732 + mutex_lock(&gr_learn_user_mutex);
49733 + spin_lock(&gr_learn_lock);
49734 + if (learn_buffer_len)
49735 + break;
49736 + spin_unlock(&gr_learn_lock);
49737 + mutex_unlock(&gr_learn_user_mutex);
49738 + if (file->f_flags & O_NONBLOCK) {
49739 + retval = -EAGAIN;
49740 + goto out;
49741 + }
49742 + if (signal_pending(current)) {
49743 + retval = -ERESTARTSYS;
49744 + goto out;
49745 + }
49746 +
49747 + schedule();
49748 + } while (1);
49749 +
49750 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49751 + learn_buffer_user_len = learn_buffer_len;
49752 + retval = learn_buffer_len;
49753 + learn_buffer_len = 0;
49754 +
49755 + spin_unlock(&gr_learn_lock);
49756 +
49757 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49758 + retval = -EFAULT;
49759 +
49760 + mutex_unlock(&gr_learn_user_mutex);
49761 +out:
49762 + set_current_state(TASK_RUNNING);
49763 + remove_wait_queue(&learn_wait, &wait);
49764 + return retval;
49765 +}
49766 +
49767 +static unsigned int
49768 +poll_learn(struct file * file, poll_table * wait)
49769 +{
49770 + poll_wait(file, &learn_wait, wait);
49771 +
49772 + if (learn_buffer_len)
49773 + return (POLLIN | POLLRDNORM);
49774 +
49775 + return 0;
49776 +}
49777 +
49778 +void
49779 +gr_clear_learn_entries(void)
49780 +{
49781 + char *tmp;
49782 +
49783 + mutex_lock(&gr_learn_user_mutex);
49784 + spin_lock(&gr_learn_lock);
49785 + tmp = learn_buffer;
49786 + learn_buffer = NULL;
49787 + spin_unlock(&gr_learn_lock);
49788 + if (tmp)
49789 + vfree(tmp);
49790 + if (learn_buffer_user != NULL) {
49791 + vfree(learn_buffer_user);
49792 + learn_buffer_user = NULL;
49793 + }
49794 + learn_buffer_len = 0;
49795 + mutex_unlock(&gr_learn_user_mutex);
49796 +
49797 + return;
49798 +}
49799 +
49800 +void
49801 +gr_add_learn_entry(const char *fmt, ...)
49802 +{
49803 + va_list args;
49804 + unsigned int len;
49805 +
49806 + if (!gr_learn_attached)
49807 + return;
49808 +
49809 + spin_lock(&gr_learn_lock);
49810 +
49811 + /* leave a gap at the end so we know when it's "full" but don't have to
49812 + compute the exact length of the string we're trying to append
49813 + */
49814 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49815 + spin_unlock(&gr_learn_lock);
49816 + wake_up_interruptible(&learn_wait);
49817 + return;
49818 + }
49819 + if (learn_buffer == NULL) {
49820 + spin_unlock(&gr_learn_lock);
49821 + return;
49822 + }
49823 +
49824 + va_start(args, fmt);
49825 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49826 + va_end(args);
49827 +
49828 + learn_buffer_len += len + 1;
49829 +
49830 + spin_unlock(&gr_learn_lock);
49831 + wake_up_interruptible(&learn_wait);
49832 +
49833 + return;
49834 +}
49835 +
49836 +static int
49837 +open_learn(struct inode *inode, struct file *file)
49838 +{
49839 + if (file->f_mode & FMODE_READ && gr_learn_attached)
49840 + return -EBUSY;
49841 + if (file->f_mode & FMODE_READ) {
49842 + int retval = 0;
49843 + mutex_lock(&gr_learn_user_mutex);
49844 + if (learn_buffer == NULL)
49845 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49846 + if (learn_buffer_user == NULL)
49847 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49848 + if (learn_buffer == NULL) {
49849 + retval = -ENOMEM;
49850 + goto out_error;
49851 + }
49852 + if (learn_buffer_user == NULL) {
49853 + retval = -ENOMEM;
49854 + goto out_error;
49855 + }
49856 + learn_buffer_len = 0;
49857 + learn_buffer_user_len = 0;
49858 + gr_learn_attached = 1;
49859 +out_error:
49860 + mutex_unlock(&gr_learn_user_mutex);
49861 + return retval;
49862 + }
49863 + return 0;
49864 +}
49865 +
49866 +static int
49867 +close_learn(struct inode *inode, struct file *file)
49868 +{
49869 + if (file->f_mode & FMODE_READ) {
49870 + char *tmp = NULL;
49871 + mutex_lock(&gr_learn_user_mutex);
49872 + spin_lock(&gr_learn_lock);
49873 + tmp = learn_buffer;
49874 + learn_buffer = NULL;
49875 + spin_unlock(&gr_learn_lock);
49876 + if (tmp)
49877 + vfree(tmp);
49878 + if (learn_buffer_user != NULL) {
49879 + vfree(learn_buffer_user);
49880 + learn_buffer_user = NULL;
49881 + }
49882 + learn_buffer_len = 0;
49883 + learn_buffer_user_len = 0;
49884 + gr_learn_attached = 0;
49885 + mutex_unlock(&gr_learn_user_mutex);
49886 + }
49887 +
49888 + return 0;
49889 +}
49890 +
49891 +const struct file_operations grsec_fops = {
49892 + .read = read_learn,
49893 + .write = write_grsec_handler,
49894 + .open = open_learn,
49895 + .release = close_learn,
49896 + .poll = poll_learn,
49897 +};
49898 diff -urNp linux-3.0.4/grsecurity/gracl_res.c linux-3.0.4/grsecurity/gracl_res.c
49899 --- linux-3.0.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49900 +++ linux-3.0.4/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
49901 @@ -0,0 +1,68 @@
49902 +#include <linux/kernel.h>
49903 +#include <linux/sched.h>
49904 +#include <linux/gracl.h>
49905 +#include <linux/grinternal.h>
49906 +
49907 +static const char *restab_log[] = {
49908 + [RLIMIT_CPU] = "RLIMIT_CPU",
49909 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49910 + [RLIMIT_DATA] = "RLIMIT_DATA",
49911 + [RLIMIT_STACK] = "RLIMIT_STACK",
49912 + [RLIMIT_CORE] = "RLIMIT_CORE",
49913 + [RLIMIT_RSS] = "RLIMIT_RSS",
49914 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
49915 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49916 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49917 + [RLIMIT_AS] = "RLIMIT_AS",
49918 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49919 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49920 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49921 + [RLIMIT_NICE] = "RLIMIT_NICE",
49922 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49923 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49924 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49925 +};
49926 +
49927 +void
49928 +gr_log_resource(const struct task_struct *task,
49929 + const int res, const unsigned long wanted, const int gt)
49930 +{
49931 + const struct cred *cred;
49932 + unsigned long rlim;
49933 +
49934 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49935 + return;
49936 +
49937 + // not yet supported resource
49938 + if (unlikely(!restab_log[res]))
49939 + return;
49940 +
49941 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49942 + rlim = task_rlimit_max(task, res);
49943 + else
49944 + rlim = task_rlimit(task, res);
49945 +
49946 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49947 + return;
49948 +
49949 + rcu_read_lock();
49950 + cred = __task_cred(task);
49951 +
49952 + if (res == RLIMIT_NPROC &&
49953 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49954 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49955 + goto out_rcu_unlock;
49956 + else if (res == RLIMIT_MEMLOCK &&
49957 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49958 + goto out_rcu_unlock;
49959 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49960 + goto out_rcu_unlock;
49961 + rcu_read_unlock();
49962 +
49963 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49964 +
49965 + return;
49966 +out_rcu_unlock:
49967 + rcu_read_unlock();
49968 + return;
49969 +}
49970 diff -urNp linux-3.0.4/grsecurity/gracl_segv.c linux-3.0.4/grsecurity/gracl_segv.c
49971 --- linux-3.0.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49972 +++ linux-3.0.4/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
49973 @@ -0,0 +1,299 @@
49974 +#include <linux/kernel.h>
49975 +#include <linux/mm.h>
49976 +#include <asm/uaccess.h>
49977 +#include <asm/errno.h>
49978 +#include <asm/mman.h>
49979 +#include <net/sock.h>
49980 +#include <linux/file.h>
49981 +#include <linux/fs.h>
49982 +#include <linux/net.h>
49983 +#include <linux/in.h>
49984 +#include <linux/slab.h>
49985 +#include <linux/types.h>
49986 +#include <linux/sched.h>
49987 +#include <linux/timer.h>
49988 +#include <linux/gracl.h>
49989 +#include <linux/grsecurity.h>
49990 +#include <linux/grinternal.h>
49991 +
49992 +static struct crash_uid *uid_set;
49993 +static unsigned short uid_used;
49994 +static DEFINE_SPINLOCK(gr_uid_lock);
49995 +extern rwlock_t gr_inode_lock;
49996 +extern struct acl_subject_label *
49997 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49998 + struct acl_role_label *role);
49999 +
50000 +#ifdef CONFIG_BTRFS_FS
50001 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50002 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50003 +#endif
50004 +
50005 +static inline dev_t __get_dev(const struct dentry *dentry)
50006 +{
50007 +#ifdef CONFIG_BTRFS_FS
50008 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50009 + return get_btrfs_dev_from_inode(dentry->d_inode);
50010 + else
50011 +#endif
50012 + return dentry->d_inode->i_sb->s_dev;
50013 +}
50014 +
50015 +int
50016 +gr_init_uidset(void)
50017 +{
50018 + uid_set =
50019 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
50020 + uid_used = 0;
50021 +
50022 + return uid_set ? 1 : 0;
50023 +}
50024 +
50025 +void
50026 +gr_free_uidset(void)
50027 +{
50028 + if (uid_set)
50029 + kfree(uid_set);
50030 +
50031 + return;
50032 +}
50033 +
50034 +int
50035 +gr_find_uid(const uid_t uid)
50036 +{
50037 + struct crash_uid *tmp = uid_set;
50038 + uid_t buid;
50039 + int low = 0, high = uid_used - 1, mid;
50040 +
50041 + while (high >= low) {
50042 + mid = (low + high) >> 1;
50043 + buid = tmp[mid].uid;
50044 + if (buid == uid)
50045 + return mid;
50046 + if (buid > uid)
50047 + high = mid - 1;
50048 + if (buid < uid)
50049 + low = mid + 1;
50050 + }
50051 +
50052 + return -1;
50053 +}
50054 +
50055 +static __inline__ void
50056 +gr_insertsort(void)
50057 +{
50058 + unsigned short i, j;
50059 + struct crash_uid index;
50060 +
50061 + for (i = 1; i < uid_used; i++) {
50062 + index = uid_set[i];
50063 + j = i;
50064 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
50065 + uid_set[j] = uid_set[j - 1];
50066 + j--;
50067 + }
50068 + uid_set[j] = index;
50069 + }
50070 +
50071 + return;
50072 +}
50073 +
50074 +static __inline__ void
50075 +gr_insert_uid(const uid_t uid, const unsigned long expires)
50076 +{
50077 + int loc;
50078 +
50079 + if (uid_used == GR_UIDTABLE_MAX)
50080 + return;
50081 +
50082 + loc = gr_find_uid(uid);
50083 +
50084 + if (loc >= 0) {
50085 + uid_set[loc].expires = expires;
50086 + return;
50087 + }
50088 +
50089 + uid_set[uid_used].uid = uid;
50090 + uid_set[uid_used].expires = expires;
50091 + uid_used++;
50092 +
50093 + gr_insertsort();
50094 +
50095 + return;
50096 +}
50097 +
50098 +void
50099 +gr_remove_uid(const unsigned short loc)
50100 +{
50101 + unsigned short i;
50102 +
50103 + for (i = loc + 1; i < uid_used; i++)
50104 + uid_set[i - 1] = uid_set[i];
50105 +
50106 + uid_used--;
50107 +
50108 + return;
50109 +}
50110 +
50111 +int
50112 +gr_check_crash_uid(const uid_t uid)
50113 +{
50114 + int loc;
50115 + int ret = 0;
50116 +
50117 + if (unlikely(!gr_acl_is_enabled()))
50118 + return 0;
50119 +
50120 + spin_lock(&gr_uid_lock);
50121 + loc = gr_find_uid(uid);
50122 +
50123 + if (loc < 0)
50124 + goto out_unlock;
50125 +
50126 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
50127 + gr_remove_uid(loc);
50128 + else
50129 + ret = 1;
50130 +
50131 +out_unlock:
50132 + spin_unlock(&gr_uid_lock);
50133 + return ret;
50134 +}
50135 +
50136 +static __inline__ int
50137 +proc_is_setxid(const struct cred *cred)
50138 +{
50139 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
50140 + cred->uid != cred->fsuid)
50141 + return 1;
50142 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
50143 + cred->gid != cred->fsgid)
50144 + return 1;
50145 +
50146 + return 0;
50147 +}
50148 +
50149 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
50150 +
50151 +void
50152 +gr_handle_crash(struct task_struct *task, const int sig)
50153 +{
50154 + struct acl_subject_label *curr;
50155 + struct acl_subject_label *curr2;
50156 + struct task_struct *tsk, *tsk2;
50157 + const struct cred *cred;
50158 + const struct cred *cred2;
50159 +
50160 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
50161 + return;
50162 +
50163 + if (unlikely(!gr_acl_is_enabled()))
50164 + return;
50165 +
50166 + curr = task->acl;
50167 +
50168 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
50169 + return;
50170 +
50171 + if (time_before_eq(curr->expires, get_seconds())) {
50172 + curr->expires = 0;
50173 + curr->crashes = 0;
50174 + }
50175 +
50176 + curr->crashes++;
50177 +
50178 + if (!curr->expires)
50179 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
50180 +
50181 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50182 + time_after(curr->expires, get_seconds())) {
50183 + rcu_read_lock();
50184 + cred = __task_cred(task);
50185 + if (cred->uid && proc_is_setxid(cred)) {
50186 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50187 + spin_lock(&gr_uid_lock);
50188 + gr_insert_uid(cred->uid, curr->expires);
50189 + spin_unlock(&gr_uid_lock);
50190 + curr->expires = 0;
50191 + curr->crashes = 0;
50192 + read_lock(&tasklist_lock);
50193 + do_each_thread(tsk2, tsk) {
50194 + cred2 = __task_cred(tsk);
50195 + if (tsk != task && cred2->uid == cred->uid)
50196 + gr_fake_force_sig(SIGKILL, tsk);
50197 + } while_each_thread(tsk2, tsk);
50198 + read_unlock(&tasklist_lock);
50199 + } else {
50200 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50201 + read_lock(&tasklist_lock);
50202 + do_each_thread(tsk2, tsk) {
50203 + if (likely(tsk != task)) {
50204 + curr2 = tsk->acl;
50205 +
50206 + if (curr2->device == curr->device &&
50207 + curr2->inode == curr->inode)
50208 + gr_fake_force_sig(SIGKILL, tsk);
50209 + }
50210 + } while_each_thread(tsk2, tsk);
50211 + read_unlock(&tasklist_lock);
50212 + }
50213 + rcu_read_unlock();
50214 + }
50215 +
50216 + return;
50217 +}
50218 +
50219 +int
50220 +gr_check_crash_exec(const struct file *filp)
50221 +{
50222 + struct acl_subject_label *curr;
50223 +
50224 + if (unlikely(!gr_acl_is_enabled()))
50225 + return 0;
50226 +
50227 + read_lock(&gr_inode_lock);
50228 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
50229 + __get_dev(filp->f_path.dentry),
50230 + current->role);
50231 + read_unlock(&gr_inode_lock);
50232 +
50233 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
50234 + (!curr->crashes && !curr->expires))
50235 + return 0;
50236 +
50237 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50238 + time_after(curr->expires, get_seconds()))
50239 + return 1;
50240 + else if (time_before_eq(curr->expires, get_seconds())) {
50241 + curr->crashes = 0;
50242 + curr->expires = 0;
50243 + }
50244 +
50245 + return 0;
50246 +}
50247 +
50248 +void
50249 +gr_handle_alertkill(struct task_struct *task)
50250 +{
50251 + struct acl_subject_label *curracl;
50252 + __u32 curr_ip;
50253 + struct task_struct *p, *p2;
50254 +
50255 + if (unlikely(!gr_acl_is_enabled()))
50256 + return;
50257 +
50258 + curracl = task->acl;
50259 + curr_ip = task->signal->curr_ip;
50260 +
50261 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
50262 + read_lock(&tasklist_lock);
50263 + do_each_thread(p2, p) {
50264 + if (p->signal->curr_ip == curr_ip)
50265 + gr_fake_force_sig(SIGKILL, p);
50266 + } while_each_thread(p2, p);
50267 + read_unlock(&tasklist_lock);
50268 + } else if (curracl->mode & GR_KILLPROC)
50269 + gr_fake_force_sig(SIGKILL, task);
50270 +
50271 + return;
50272 +}
50273 diff -urNp linux-3.0.4/grsecurity/gracl_shm.c linux-3.0.4/grsecurity/gracl_shm.c
50274 --- linux-3.0.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
50275 +++ linux-3.0.4/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
50276 @@ -0,0 +1,40 @@
50277 +#include <linux/kernel.h>
50278 +#include <linux/mm.h>
50279 +#include <linux/sched.h>
50280 +#include <linux/file.h>
50281 +#include <linux/ipc.h>
50282 +#include <linux/gracl.h>
50283 +#include <linux/grsecurity.h>
50284 +#include <linux/grinternal.h>
50285 +
50286 +int
50287 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50288 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50289 +{
50290 + struct task_struct *task;
50291 +
50292 + if (!gr_acl_is_enabled())
50293 + return 1;
50294 +
50295 + rcu_read_lock();
50296 + read_lock(&tasklist_lock);
50297 +
50298 + task = find_task_by_vpid(shm_cprid);
50299 +
50300 + if (unlikely(!task))
50301 + task = find_task_by_vpid(shm_lapid);
50302 +
50303 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
50304 + (task->pid == shm_lapid)) &&
50305 + (task->acl->mode & GR_PROTSHM) &&
50306 + (task->acl != current->acl))) {
50307 + read_unlock(&tasklist_lock);
50308 + rcu_read_unlock();
50309 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
50310 + return 0;
50311 + }
50312 + read_unlock(&tasklist_lock);
50313 + rcu_read_unlock();
50314 +
50315 + return 1;
50316 +}
50317 diff -urNp linux-3.0.4/grsecurity/grsec_chdir.c linux-3.0.4/grsecurity/grsec_chdir.c
50318 --- linux-3.0.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
50319 +++ linux-3.0.4/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
50320 @@ -0,0 +1,19 @@
50321 +#include <linux/kernel.h>
50322 +#include <linux/sched.h>
50323 +#include <linux/fs.h>
50324 +#include <linux/file.h>
50325 +#include <linux/grsecurity.h>
50326 +#include <linux/grinternal.h>
50327 +
50328 +void
50329 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
50330 +{
50331 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50332 + if ((grsec_enable_chdir && grsec_enable_group &&
50333 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
50334 + !grsec_enable_group)) {
50335 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
50336 + }
50337 +#endif
50338 + return;
50339 +}
50340 diff -urNp linux-3.0.4/grsecurity/grsec_chroot.c linux-3.0.4/grsecurity/grsec_chroot.c
50341 --- linux-3.0.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
50342 +++ linux-3.0.4/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
50343 @@ -0,0 +1,351 @@
50344 +#include <linux/kernel.h>
50345 +#include <linux/module.h>
50346 +#include <linux/sched.h>
50347 +#include <linux/file.h>
50348 +#include <linux/fs.h>
50349 +#include <linux/mount.h>
50350 +#include <linux/types.h>
50351 +#include <linux/pid_namespace.h>
50352 +#include <linux/grsecurity.h>
50353 +#include <linux/grinternal.h>
50354 +
50355 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
50356 +{
50357 +#ifdef CONFIG_GRKERNSEC
50358 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
50359 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
50360 + task->gr_is_chrooted = 1;
50361 + else
50362 + task->gr_is_chrooted = 0;
50363 +
50364 + task->gr_chroot_dentry = path->dentry;
50365 +#endif
50366 + return;
50367 +}
50368 +
50369 +void gr_clear_chroot_entries(struct task_struct *task)
50370 +{
50371 +#ifdef CONFIG_GRKERNSEC
50372 + task->gr_is_chrooted = 0;
50373 + task->gr_chroot_dentry = NULL;
50374 +#endif
50375 + return;
50376 +}
50377 +
50378 +int
50379 +gr_handle_chroot_unix(const pid_t pid)
50380 +{
50381 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50382 + struct task_struct *p;
50383 +
50384 + if (unlikely(!grsec_enable_chroot_unix))
50385 + return 1;
50386 +
50387 + if (likely(!proc_is_chrooted(current)))
50388 + return 1;
50389 +
50390 + rcu_read_lock();
50391 + read_lock(&tasklist_lock);
50392 + p = find_task_by_vpid_unrestricted(pid);
50393 + if (unlikely(p && !have_same_root(current, p))) {
50394 + read_unlock(&tasklist_lock);
50395 + rcu_read_unlock();
50396 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
50397 + return 0;
50398 + }
50399 + read_unlock(&tasklist_lock);
50400 + rcu_read_unlock();
50401 +#endif
50402 + return 1;
50403 +}
50404 +
50405 +int
50406 +gr_handle_chroot_nice(void)
50407 +{
50408 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50409 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
50410 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
50411 + return -EPERM;
50412 + }
50413 +#endif
50414 + return 0;
50415 +}
50416 +
50417 +int
50418 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
50419 +{
50420 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50421 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
50422 + && proc_is_chrooted(current)) {
50423 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
50424 + return -EACCES;
50425 + }
50426 +#endif
50427 + return 0;
50428 +}
50429 +
50430 +int
50431 +gr_handle_chroot_rawio(const struct inode *inode)
50432 +{
50433 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50434 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50435 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
50436 + return 1;
50437 +#endif
50438 + return 0;
50439 +}
50440 +
50441 +int
50442 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
50443 +{
50444 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50445 + struct task_struct *p;
50446 + int ret = 0;
50447 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
50448 + return ret;
50449 +
50450 + read_lock(&tasklist_lock);
50451 + do_each_pid_task(pid, type, p) {
50452 + if (!have_same_root(current, p)) {
50453 + ret = 1;
50454 + goto out;
50455 + }
50456 + } while_each_pid_task(pid, type, p);
50457 +out:
50458 + read_unlock(&tasklist_lock);
50459 + return ret;
50460 +#endif
50461 + return 0;
50462 +}
50463 +
50464 +int
50465 +gr_pid_is_chrooted(struct task_struct *p)
50466 +{
50467 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50468 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
50469 + return 0;
50470 +
50471 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
50472 + !have_same_root(current, p)) {
50473 + return 1;
50474 + }
50475 +#endif
50476 + return 0;
50477 +}
50478 +
50479 +EXPORT_SYMBOL(gr_pid_is_chrooted);
50480 +
50481 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
50482 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
50483 +{
50484 + struct path path, currentroot;
50485 + int ret = 0;
50486 +
50487 + path.dentry = (struct dentry *)u_dentry;
50488 + path.mnt = (struct vfsmount *)u_mnt;
50489 + get_fs_root(current->fs, &currentroot);
50490 + if (path_is_under(&path, &currentroot))
50491 + ret = 1;
50492 + path_put(&currentroot);
50493 +
50494 + return ret;
50495 +}
50496 +#endif
50497 +
50498 +int
50499 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
50500 +{
50501 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50502 + if (!grsec_enable_chroot_fchdir)
50503 + return 1;
50504 +
50505 + if (!proc_is_chrooted(current))
50506 + return 1;
50507 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50508 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50509 + return 0;
50510 + }
50511 +#endif
50512 + return 1;
50513 +}
50514 +
50515 +int
50516 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50517 + const time_t shm_createtime)
50518 +{
50519 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50520 + struct task_struct *p;
50521 + time_t starttime;
50522 +
50523 + if (unlikely(!grsec_enable_chroot_shmat))
50524 + return 1;
50525 +
50526 + if (likely(!proc_is_chrooted(current)))
50527 + return 1;
50528 +
50529 + rcu_read_lock();
50530 + read_lock(&tasklist_lock);
50531 +
50532 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
50533 + starttime = p->start_time.tv_sec;
50534 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
50535 + if (have_same_root(current, p)) {
50536 + goto allow;
50537 + } else {
50538 + read_unlock(&tasklist_lock);
50539 + rcu_read_unlock();
50540 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50541 + return 0;
50542 + }
50543 + }
50544 + /* creator exited, pid reuse, fall through to next check */
50545 + }
50546 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50547 + if (unlikely(!have_same_root(current, p))) {
50548 + read_unlock(&tasklist_lock);
50549 + rcu_read_unlock();
50550 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50551 + return 0;
50552 + }
50553 + }
50554 +
50555 +allow:
50556 + read_unlock(&tasklist_lock);
50557 + rcu_read_unlock();
50558 +#endif
50559 + return 1;
50560 +}
50561 +
50562 +void
50563 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50564 +{
50565 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50566 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50567 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50568 +#endif
50569 + return;
50570 +}
50571 +
50572 +int
50573 +gr_handle_chroot_mknod(const struct dentry *dentry,
50574 + const struct vfsmount *mnt, const int mode)
50575 +{
50576 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50577 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50578 + proc_is_chrooted(current)) {
50579 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50580 + return -EPERM;
50581 + }
50582 +#endif
50583 + return 0;
50584 +}
50585 +
50586 +int
50587 +gr_handle_chroot_mount(const struct dentry *dentry,
50588 + const struct vfsmount *mnt, const char *dev_name)
50589 +{
50590 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50591 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50592 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
50593 + return -EPERM;
50594 + }
50595 +#endif
50596 + return 0;
50597 +}
50598 +
50599 +int
50600 +gr_handle_chroot_pivot(void)
50601 +{
50602 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50603 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50604 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50605 + return -EPERM;
50606 + }
50607 +#endif
50608 + return 0;
50609 +}
50610 +
50611 +int
50612 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50613 +{
50614 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50615 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50616 + !gr_is_outside_chroot(dentry, mnt)) {
50617 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50618 + return -EPERM;
50619 + }
50620 +#endif
50621 + return 0;
50622 +}
50623 +
50624 +extern const char *captab_log[];
50625 +extern int captab_log_entries;
50626 +
50627 +int
50628 +gr_chroot_is_capable(const int cap)
50629 +{
50630 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50631 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
50632 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50633 + if (cap_raised(chroot_caps, cap)) {
50634 + const struct cred *creds = current_cred();
50635 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
50636 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
50637 + }
50638 + return 0;
50639 + }
50640 + }
50641 +#endif
50642 + return 1;
50643 +}
50644 +
50645 +int
50646 +gr_chroot_is_capable_nolog(const int cap)
50647 +{
50648 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50649 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
50650 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50651 + if (cap_raised(chroot_caps, cap)) {
50652 + return 0;
50653 + }
50654 + }
50655 +#endif
50656 + return 1;
50657 +}
50658 +
50659 +int
50660 +gr_handle_chroot_sysctl(const int op)
50661 +{
50662 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50663 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
50664 + proc_is_chrooted(current))
50665 + return -EACCES;
50666 +#endif
50667 + return 0;
50668 +}
50669 +
50670 +void
50671 +gr_handle_chroot_chdir(struct path *path)
50672 +{
50673 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50674 + if (grsec_enable_chroot_chdir)
50675 + set_fs_pwd(current->fs, path);
50676 +#endif
50677 + return;
50678 +}
50679 +
50680 +int
50681 +gr_handle_chroot_chmod(const struct dentry *dentry,
50682 + const struct vfsmount *mnt, const int mode)
50683 +{
50684 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50685 + /* allow chmod +s on directories, but not files */
50686 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50687 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50688 + proc_is_chrooted(current)) {
50689 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50690 + return -EPERM;
50691 + }
50692 +#endif
50693 + return 0;
50694 +}
50695 diff -urNp linux-3.0.4/grsecurity/grsec_disabled.c linux-3.0.4/grsecurity/grsec_disabled.c
50696 --- linux-3.0.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50697 +++ linux-3.0.4/grsecurity/grsec_disabled.c 2011-09-24 08:13:01.000000000 -0400
50698 @@ -0,0 +1,433 @@
50699 +#include <linux/kernel.h>
50700 +#include <linux/module.h>
50701 +#include <linux/sched.h>
50702 +#include <linux/file.h>
50703 +#include <linux/fs.h>
50704 +#include <linux/kdev_t.h>
50705 +#include <linux/net.h>
50706 +#include <linux/in.h>
50707 +#include <linux/ip.h>
50708 +#include <linux/skbuff.h>
50709 +#include <linux/sysctl.h>
50710 +
50711 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50712 +void
50713 +pax_set_initial_flags(struct linux_binprm *bprm)
50714 +{
50715 + return;
50716 +}
50717 +#endif
50718 +
50719 +#ifdef CONFIG_SYSCTL
50720 +__u32
50721 +gr_handle_sysctl(const struct ctl_table * table, const int op)
50722 +{
50723 + return 0;
50724 +}
50725 +#endif
50726 +
50727 +#ifdef CONFIG_TASKSTATS
50728 +int gr_is_taskstats_denied(int pid)
50729 +{
50730 + return 0;
50731 +}
50732 +#endif
50733 +
50734 +int
50735 +gr_acl_is_enabled(void)
50736 +{
50737 + return 0;
50738 +}
50739 +
50740 +int
50741 +gr_handle_rawio(const struct inode *inode)
50742 +{
50743 + return 0;
50744 +}
50745 +
50746 +void
50747 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50748 +{
50749 + return;
50750 +}
50751 +
50752 +int
50753 +gr_handle_ptrace(struct task_struct *task, const long request)
50754 +{
50755 + return 0;
50756 +}
50757 +
50758 +int
50759 +gr_handle_proc_ptrace(struct task_struct *task)
50760 +{
50761 + return 0;
50762 +}
50763 +
50764 +void
50765 +gr_learn_resource(const struct task_struct *task,
50766 + const int res, const unsigned long wanted, const int gt)
50767 +{
50768 + return;
50769 +}
50770 +
50771 +int
50772 +gr_set_acls(const int type)
50773 +{
50774 + return 0;
50775 +}
50776 +
50777 +int
50778 +gr_check_hidden_task(const struct task_struct *tsk)
50779 +{
50780 + return 0;
50781 +}
50782 +
50783 +int
50784 +gr_check_protected_task(const struct task_struct *task)
50785 +{
50786 + return 0;
50787 +}
50788 +
50789 +int
50790 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50791 +{
50792 + return 0;
50793 +}
50794 +
50795 +void
50796 +gr_copy_label(struct task_struct *tsk)
50797 +{
50798 + return;
50799 +}
50800 +
50801 +void
50802 +gr_set_pax_flags(struct task_struct *task)
50803 +{
50804 + return;
50805 +}
50806 +
50807 +int
50808 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50809 + const int unsafe_share)
50810 +{
50811 + return 0;
50812 +}
50813 +
50814 +void
50815 +gr_handle_delete(const ino_t ino, const dev_t dev)
50816 +{
50817 + return;
50818 +}
50819 +
50820 +void
50821 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50822 +{
50823 + return;
50824 +}
50825 +
50826 +void
50827 +gr_handle_crash(struct task_struct *task, const int sig)
50828 +{
50829 + return;
50830 +}
50831 +
50832 +int
50833 +gr_check_crash_exec(const struct file *filp)
50834 +{
50835 + return 0;
50836 +}
50837 +
50838 +int
50839 +gr_check_crash_uid(const uid_t uid)
50840 +{
50841 + return 0;
50842 +}
50843 +
50844 +void
50845 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50846 + struct dentry *old_dentry,
50847 + struct dentry *new_dentry,
50848 + struct vfsmount *mnt, const __u8 replace)
50849 +{
50850 + return;
50851 +}
50852 +
50853 +int
50854 +gr_search_socket(const int family, const int type, const int protocol)
50855 +{
50856 + return 1;
50857 +}
50858 +
50859 +int
50860 +gr_search_connectbind(const int mode, const struct socket *sock,
50861 + const struct sockaddr_in *addr)
50862 +{
50863 + return 0;
50864 +}
50865 +
50866 +void
50867 +gr_handle_alertkill(struct task_struct *task)
50868 +{
50869 + return;
50870 +}
50871 +
50872 +__u32
50873 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50874 +{
50875 + return 1;
50876 +}
50877 +
50878 +__u32
50879 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50880 + const struct vfsmount * mnt)
50881 +{
50882 + return 1;
50883 +}
50884 +
50885 +__u32
50886 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50887 + const int fmode)
50888 +{
50889 + return 1;
50890 +}
50891 +
50892 +__u32
50893 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50894 +{
50895 + return 1;
50896 +}
50897 +
50898 +__u32
50899 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50900 +{
50901 + return 1;
50902 +}
50903 +
50904 +int
50905 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50906 + unsigned int *vm_flags)
50907 +{
50908 + return 1;
50909 +}
50910 +
50911 +__u32
50912 +gr_acl_handle_truncate(const struct dentry * dentry,
50913 + const struct vfsmount * mnt)
50914 +{
50915 + return 1;
50916 +}
50917 +
50918 +__u32
50919 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50920 +{
50921 + return 1;
50922 +}
50923 +
50924 +__u32
50925 +gr_acl_handle_access(const struct dentry * dentry,
50926 + const struct vfsmount * mnt, const int fmode)
50927 +{
50928 + return 1;
50929 +}
50930 +
50931 +__u32
50932 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50933 + mode_t mode)
50934 +{
50935 + return 1;
50936 +}
50937 +
50938 +__u32
50939 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50940 + mode_t mode)
50941 +{
50942 + return 1;
50943 +}
50944 +
50945 +__u32
50946 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50947 +{
50948 + return 1;
50949 +}
50950 +
50951 +__u32
50952 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50953 +{
50954 + return 1;
50955 +}
50956 +
50957 +void
50958 +grsecurity_init(void)
50959 +{
50960 + return;
50961 +}
50962 +
50963 +__u32
50964 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50965 + const struct dentry * parent_dentry,
50966 + const struct vfsmount * parent_mnt,
50967 + const int mode)
50968 +{
50969 + return 1;
50970 +}
50971 +
50972 +__u32
50973 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50974 + const struct dentry * parent_dentry,
50975 + const struct vfsmount * parent_mnt)
50976 +{
50977 + return 1;
50978 +}
50979 +
50980 +__u32
50981 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50982 + const struct dentry * parent_dentry,
50983 + const struct vfsmount * parent_mnt, const char *from)
50984 +{
50985 + return 1;
50986 +}
50987 +
50988 +__u32
50989 +gr_acl_handle_link(const struct dentry * new_dentry,
50990 + const struct dentry * parent_dentry,
50991 + const struct vfsmount * parent_mnt,
50992 + const struct dentry * old_dentry,
50993 + const struct vfsmount * old_mnt, const char *to)
50994 +{
50995 + return 1;
50996 +}
50997 +
50998 +int
50999 +gr_acl_handle_rename(const struct dentry *new_dentry,
51000 + const struct dentry *parent_dentry,
51001 + const struct vfsmount *parent_mnt,
51002 + const struct dentry *old_dentry,
51003 + const struct inode *old_parent_inode,
51004 + const struct vfsmount *old_mnt, const char *newname)
51005 +{
51006 + return 0;
51007 +}
51008 +
51009 +int
51010 +gr_acl_handle_filldir(const struct file *file, const char *name,
51011 + const int namelen, const ino_t ino)
51012 +{
51013 + return 1;
51014 +}
51015 +
51016 +int
51017 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
51018 + const time_t shm_createtime, const uid_t cuid, const int shmid)
51019 +{
51020 + return 1;
51021 +}
51022 +
51023 +int
51024 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
51025 +{
51026 + return 0;
51027 +}
51028 +
51029 +int
51030 +gr_search_accept(const struct socket *sock)
51031 +{
51032 + return 0;
51033 +}
51034 +
51035 +int
51036 +gr_search_listen(const struct socket *sock)
51037 +{
51038 + return 0;
51039 +}
51040 +
51041 +int
51042 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
51043 +{
51044 + return 0;
51045 +}
51046 +
51047 +__u32
51048 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
51049 +{
51050 + return 1;
51051 +}
51052 +
51053 +__u32
51054 +gr_acl_handle_creat(const struct dentry * dentry,
51055 + const struct dentry * p_dentry,
51056 + const struct vfsmount * p_mnt, const int fmode,
51057 + const int imode)
51058 +{
51059 + return 1;
51060 +}
51061 +
51062 +void
51063 +gr_acl_handle_exit(void)
51064 +{
51065 + return;
51066 +}
51067 +
51068 +int
51069 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51070 +{
51071 + return 1;
51072 +}
51073 +
51074 +void
51075 +gr_set_role_label(const uid_t uid, const gid_t gid)
51076 +{
51077 + return;
51078 +}
51079 +
51080 +int
51081 +gr_acl_handle_procpidmem(const struct task_struct *task)
51082 +{
51083 + return 0;
51084 +}
51085 +
51086 +int
51087 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
51088 +{
51089 + return 0;
51090 +}
51091 +
51092 +int
51093 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
51094 +{
51095 + return 0;
51096 +}
51097 +
51098 +void
51099 +gr_set_kernel_label(struct task_struct *task)
51100 +{
51101 + return;
51102 +}
51103 +
51104 +int
51105 +gr_check_user_change(int real, int effective, int fs)
51106 +{
51107 + return 0;
51108 +}
51109 +
51110 +int
51111 +gr_check_group_change(int real, int effective, int fs)
51112 +{
51113 + return 0;
51114 +}
51115 +
51116 +int gr_acl_enable_at_secure(void)
51117 +{
51118 + return 0;
51119 +}
51120 +
51121 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51122 +{
51123 + return dentry->d_inode->i_sb->s_dev;
51124 +}
51125 +
51126 +EXPORT_SYMBOL(gr_learn_resource);
51127 +EXPORT_SYMBOL(gr_set_kernel_label);
51128 +#ifdef CONFIG_SECURITY
51129 +EXPORT_SYMBOL(gr_check_user_change);
51130 +EXPORT_SYMBOL(gr_check_group_change);
51131 +#endif
51132 diff -urNp linux-3.0.4/grsecurity/grsec_exec.c linux-3.0.4/grsecurity/grsec_exec.c
51133 --- linux-3.0.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
51134 +++ linux-3.0.4/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
51135 @@ -0,0 +1,145 @@
51136 +#include <linux/kernel.h>
51137 +#include <linux/sched.h>
51138 +#include <linux/file.h>
51139 +#include <linux/binfmts.h>
51140 +#include <linux/fs.h>
51141 +#include <linux/types.h>
51142 +#include <linux/grdefs.h>
51143 +#include <linux/grsecurity.h>
51144 +#include <linux/grinternal.h>
51145 +#include <linux/capability.h>
51146 +#include <linux/module.h>
51147 +
51148 +#include <asm/uaccess.h>
51149 +
51150 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51151 +static char gr_exec_arg_buf[132];
51152 +static DEFINE_MUTEX(gr_exec_arg_mutex);
51153 +#endif
51154 +
51155 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
51156 +
51157 +void
51158 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
51159 +{
51160 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51161 + char *grarg = gr_exec_arg_buf;
51162 + unsigned int i, x, execlen = 0;
51163 + char c;
51164 +
51165 + if (!((grsec_enable_execlog && grsec_enable_group &&
51166 + in_group_p(grsec_audit_gid))
51167 + || (grsec_enable_execlog && !grsec_enable_group)))
51168 + return;
51169 +
51170 + mutex_lock(&gr_exec_arg_mutex);
51171 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
51172 +
51173 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
51174 + const char __user *p;
51175 + unsigned int len;
51176 +
51177 + p = get_user_arg_ptr(argv, i);
51178 + if (IS_ERR(p))
51179 + goto log;
51180 +
51181 + len = strnlen_user(p, 128 - execlen);
51182 + if (len > 128 - execlen)
51183 + len = 128 - execlen;
51184 + else if (len > 0)
51185 + len--;
51186 + if (copy_from_user(grarg + execlen, p, len))
51187 + goto log;
51188 +
51189 + /* rewrite unprintable characters */
51190 + for (x = 0; x < len; x++) {
51191 + c = *(grarg + execlen + x);
51192 + if (c < 32 || c > 126)
51193 + *(grarg + execlen + x) = ' ';
51194 + }
51195 +
51196 + execlen += len;
51197 + *(grarg + execlen) = ' ';
51198 + *(grarg + execlen + 1) = '\0';
51199 + execlen++;
51200 + }
51201 +
51202 + log:
51203 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51204 + bprm->file->f_path.mnt, grarg);
51205 + mutex_unlock(&gr_exec_arg_mutex);
51206 +#endif
51207 + return;
51208 +}
51209 +
51210 +#ifdef CONFIG_GRKERNSEC
51211 +extern int gr_acl_is_capable(const int cap);
51212 +extern int gr_acl_is_capable_nolog(const int cap);
51213 +extern int gr_chroot_is_capable(const int cap);
51214 +extern int gr_chroot_is_capable_nolog(const int cap);
51215 +#endif
51216 +
51217 +const char *captab_log[] = {
51218 + "CAP_CHOWN",
51219 + "CAP_DAC_OVERRIDE",
51220 + "CAP_DAC_READ_SEARCH",
51221 + "CAP_FOWNER",
51222 + "CAP_FSETID",
51223 + "CAP_KILL",
51224 + "CAP_SETGID",
51225 + "CAP_SETUID",
51226 + "CAP_SETPCAP",
51227 + "CAP_LINUX_IMMUTABLE",
51228 + "CAP_NET_BIND_SERVICE",
51229 + "CAP_NET_BROADCAST",
51230 + "CAP_NET_ADMIN",
51231 + "CAP_NET_RAW",
51232 + "CAP_IPC_LOCK",
51233 + "CAP_IPC_OWNER",
51234 + "CAP_SYS_MODULE",
51235 + "CAP_SYS_RAWIO",
51236 + "CAP_SYS_CHROOT",
51237 + "CAP_SYS_PTRACE",
51238 + "CAP_SYS_PACCT",
51239 + "CAP_SYS_ADMIN",
51240 + "CAP_SYS_BOOT",
51241 + "CAP_SYS_NICE",
51242 + "CAP_SYS_RESOURCE",
51243 + "CAP_SYS_TIME",
51244 + "CAP_SYS_TTY_CONFIG",
51245 + "CAP_MKNOD",
51246 + "CAP_LEASE",
51247 + "CAP_AUDIT_WRITE",
51248 + "CAP_AUDIT_CONTROL",
51249 + "CAP_SETFCAP",
51250 + "CAP_MAC_OVERRIDE",
51251 + "CAP_MAC_ADMIN",
51252 + "CAP_SYSLOG"
51253 +};
51254 +
51255 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
51256 +
51257 +int gr_is_capable(const int cap)
51258 +{
51259 +#ifdef CONFIG_GRKERNSEC
51260 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
51261 + return 1;
51262 + return 0;
51263 +#else
51264 + return 1;
51265 +#endif
51266 +}
51267 +
51268 +int gr_is_capable_nolog(const int cap)
51269 +{
51270 +#ifdef CONFIG_GRKERNSEC
51271 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
51272 + return 1;
51273 + return 0;
51274 +#else
51275 + return 1;
51276 +#endif
51277 +}
51278 +
51279 +EXPORT_SYMBOL(gr_is_capable);
51280 +EXPORT_SYMBOL(gr_is_capable_nolog);
51281 diff -urNp linux-3.0.4/grsecurity/grsec_fifo.c linux-3.0.4/grsecurity/grsec_fifo.c
51282 --- linux-3.0.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
51283 +++ linux-3.0.4/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
51284 @@ -0,0 +1,24 @@
51285 +#include <linux/kernel.h>
51286 +#include <linux/sched.h>
51287 +#include <linux/fs.h>
51288 +#include <linux/file.h>
51289 +#include <linux/grinternal.h>
51290 +
51291 +int
51292 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
51293 + const struct dentry *dir, const int flag, const int acc_mode)
51294 +{
51295 +#ifdef CONFIG_GRKERNSEC_FIFO
51296 + const struct cred *cred = current_cred();
51297 +
51298 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
51299 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
51300 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
51301 + (cred->fsuid != dentry->d_inode->i_uid)) {
51302 + if (!inode_permission(dentry->d_inode, acc_mode))
51303 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
51304 + return -EACCES;
51305 + }
51306 +#endif
51307 + return 0;
51308 +}
51309 diff -urNp linux-3.0.4/grsecurity/grsec_fork.c linux-3.0.4/grsecurity/grsec_fork.c
51310 --- linux-3.0.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
51311 +++ linux-3.0.4/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
51312 @@ -0,0 +1,23 @@
51313 +#include <linux/kernel.h>
51314 +#include <linux/sched.h>
51315 +#include <linux/grsecurity.h>
51316 +#include <linux/grinternal.h>
51317 +#include <linux/errno.h>
51318 +
51319 +void
51320 +gr_log_forkfail(const int retval)
51321 +{
51322 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51323 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
51324 + switch (retval) {
51325 + case -EAGAIN:
51326 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
51327 + break;
51328 + case -ENOMEM:
51329 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
51330 + break;
51331 + }
51332 + }
51333 +#endif
51334 + return;
51335 +}
51336 diff -urNp linux-3.0.4/grsecurity/grsec_init.c linux-3.0.4/grsecurity/grsec_init.c
51337 --- linux-3.0.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
51338 +++ linux-3.0.4/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
51339 @@ -0,0 +1,269 @@
51340 +#include <linux/kernel.h>
51341 +#include <linux/sched.h>
51342 +#include <linux/mm.h>
51343 +#include <linux/gracl.h>
51344 +#include <linux/slab.h>
51345 +#include <linux/vmalloc.h>
51346 +#include <linux/percpu.h>
51347 +#include <linux/module.h>
51348 +
51349 +int grsec_enable_brute;
51350 +int grsec_enable_link;
51351 +int grsec_enable_dmesg;
51352 +int grsec_enable_harden_ptrace;
51353 +int grsec_enable_fifo;
51354 +int grsec_enable_execlog;
51355 +int grsec_enable_signal;
51356 +int grsec_enable_forkfail;
51357 +int grsec_enable_audit_ptrace;
51358 +int grsec_enable_time;
51359 +int grsec_enable_audit_textrel;
51360 +int grsec_enable_group;
51361 +int grsec_audit_gid;
51362 +int grsec_enable_chdir;
51363 +int grsec_enable_mount;
51364 +int grsec_enable_rofs;
51365 +int grsec_enable_chroot_findtask;
51366 +int grsec_enable_chroot_mount;
51367 +int grsec_enable_chroot_shmat;
51368 +int grsec_enable_chroot_fchdir;
51369 +int grsec_enable_chroot_double;
51370 +int grsec_enable_chroot_pivot;
51371 +int grsec_enable_chroot_chdir;
51372 +int grsec_enable_chroot_chmod;
51373 +int grsec_enable_chroot_mknod;
51374 +int grsec_enable_chroot_nice;
51375 +int grsec_enable_chroot_execlog;
51376 +int grsec_enable_chroot_caps;
51377 +int grsec_enable_chroot_sysctl;
51378 +int grsec_enable_chroot_unix;
51379 +int grsec_enable_tpe;
51380 +int grsec_tpe_gid;
51381 +int grsec_enable_blackhole;
51382 +#ifdef CONFIG_IPV6_MODULE
51383 +EXPORT_SYMBOL(grsec_enable_blackhole);
51384 +#endif
51385 +int grsec_lastack_retries;
51386 +int grsec_enable_tpe_all;
51387 +int grsec_enable_tpe_invert;
51388 +int grsec_enable_socket_all;
51389 +int grsec_socket_all_gid;
51390 +int grsec_enable_socket_client;
51391 +int grsec_socket_client_gid;
51392 +int grsec_enable_socket_server;
51393 +int grsec_socket_server_gid;
51394 +int grsec_resource_logging;
51395 +int grsec_disable_privio;
51396 +int grsec_enable_log_rwxmaps;
51397 +int grsec_lock;
51398 +
51399 +DEFINE_SPINLOCK(grsec_alert_lock);
51400 +unsigned long grsec_alert_wtime = 0;
51401 +unsigned long grsec_alert_fyet = 0;
51402 +
51403 +DEFINE_SPINLOCK(grsec_audit_lock);
51404 +
51405 +DEFINE_RWLOCK(grsec_exec_file_lock);
51406 +
51407 +char *gr_shared_page[4];
51408 +
51409 +char *gr_alert_log_fmt;
51410 +char *gr_audit_log_fmt;
51411 +char *gr_alert_log_buf;
51412 +char *gr_audit_log_buf;
51413 +
51414 +extern struct gr_arg *gr_usermode;
51415 +extern unsigned char *gr_system_salt;
51416 +extern unsigned char *gr_system_sum;
51417 +
51418 +void __init
51419 +grsecurity_init(void)
51420 +{
51421 + int j;
51422 + /* create the per-cpu shared pages */
51423 +
51424 +#ifdef CONFIG_X86
51425 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
51426 +#endif
51427 +
51428 + for (j = 0; j < 4; j++) {
51429 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
51430 + if (gr_shared_page[j] == NULL) {
51431 + panic("Unable to allocate grsecurity shared page");
51432 + return;
51433 + }
51434 + }
51435 +
51436 + /* allocate log buffers */
51437 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
51438 + if (!gr_alert_log_fmt) {
51439 + panic("Unable to allocate grsecurity alert log format buffer");
51440 + return;
51441 + }
51442 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
51443 + if (!gr_audit_log_fmt) {
51444 + panic("Unable to allocate grsecurity audit log format buffer");
51445 + return;
51446 + }
51447 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51448 + if (!gr_alert_log_buf) {
51449 + panic("Unable to allocate grsecurity alert log buffer");
51450 + return;
51451 + }
51452 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51453 + if (!gr_audit_log_buf) {
51454 + panic("Unable to allocate grsecurity audit log buffer");
51455 + return;
51456 + }
51457 +
51458 + /* allocate memory for authentication structure */
51459 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
51460 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
51461 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
51462 +
51463 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
51464 + panic("Unable to allocate grsecurity authentication structure");
51465 + return;
51466 + }
51467 +
51468 +
51469 +#ifdef CONFIG_GRKERNSEC_IO
51470 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
51471 + grsec_disable_privio = 1;
51472 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51473 + grsec_disable_privio = 1;
51474 +#else
51475 + grsec_disable_privio = 0;
51476 +#endif
51477 +#endif
51478 +
51479 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51480 + /* for backward compatibility, tpe_invert always defaults to on if
51481 + enabled in the kernel
51482 + */
51483 + grsec_enable_tpe_invert = 1;
51484 +#endif
51485 +
51486 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51487 +#ifndef CONFIG_GRKERNSEC_SYSCTL
51488 + grsec_lock = 1;
51489 +#endif
51490 +
51491 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51492 + grsec_enable_audit_textrel = 1;
51493 +#endif
51494 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51495 + grsec_enable_log_rwxmaps = 1;
51496 +#endif
51497 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51498 + grsec_enable_group = 1;
51499 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
51500 +#endif
51501 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51502 + grsec_enable_chdir = 1;
51503 +#endif
51504 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51505 + grsec_enable_harden_ptrace = 1;
51506 +#endif
51507 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51508 + grsec_enable_mount = 1;
51509 +#endif
51510 +#ifdef CONFIG_GRKERNSEC_LINK
51511 + grsec_enable_link = 1;
51512 +#endif
51513 +#ifdef CONFIG_GRKERNSEC_BRUTE
51514 + grsec_enable_brute = 1;
51515 +#endif
51516 +#ifdef CONFIG_GRKERNSEC_DMESG
51517 + grsec_enable_dmesg = 1;
51518 +#endif
51519 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51520 + grsec_enable_blackhole = 1;
51521 + grsec_lastack_retries = 4;
51522 +#endif
51523 +#ifdef CONFIG_GRKERNSEC_FIFO
51524 + grsec_enable_fifo = 1;
51525 +#endif
51526 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51527 + grsec_enable_execlog = 1;
51528 +#endif
51529 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51530 + grsec_enable_signal = 1;
51531 +#endif
51532 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51533 + grsec_enable_forkfail = 1;
51534 +#endif
51535 +#ifdef CONFIG_GRKERNSEC_TIME
51536 + grsec_enable_time = 1;
51537 +#endif
51538 +#ifdef CONFIG_GRKERNSEC_RESLOG
51539 + grsec_resource_logging = 1;
51540 +#endif
51541 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51542 + grsec_enable_chroot_findtask = 1;
51543 +#endif
51544 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51545 + grsec_enable_chroot_unix = 1;
51546 +#endif
51547 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51548 + grsec_enable_chroot_mount = 1;
51549 +#endif
51550 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51551 + grsec_enable_chroot_fchdir = 1;
51552 +#endif
51553 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51554 + grsec_enable_chroot_shmat = 1;
51555 +#endif
51556 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51557 + grsec_enable_audit_ptrace = 1;
51558 +#endif
51559 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51560 + grsec_enable_chroot_double = 1;
51561 +#endif
51562 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51563 + grsec_enable_chroot_pivot = 1;
51564 +#endif
51565 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51566 + grsec_enable_chroot_chdir = 1;
51567 +#endif
51568 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51569 + grsec_enable_chroot_chmod = 1;
51570 +#endif
51571 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51572 + grsec_enable_chroot_mknod = 1;
51573 +#endif
51574 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51575 + grsec_enable_chroot_nice = 1;
51576 +#endif
51577 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51578 + grsec_enable_chroot_execlog = 1;
51579 +#endif
51580 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51581 + grsec_enable_chroot_caps = 1;
51582 +#endif
51583 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51584 + grsec_enable_chroot_sysctl = 1;
51585 +#endif
51586 +#ifdef CONFIG_GRKERNSEC_TPE
51587 + grsec_enable_tpe = 1;
51588 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51589 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
51590 + grsec_enable_tpe_all = 1;
51591 +#endif
51592 +#endif
51593 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51594 + grsec_enable_socket_all = 1;
51595 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51596 +#endif
51597 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51598 + grsec_enable_socket_client = 1;
51599 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51600 +#endif
51601 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51602 + grsec_enable_socket_server = 1;
51603 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51604 +#endif
51605 +#endif
51606 +
51607 + return;
51608 +}
51609 diff -urNp linux-3.0.4/grsecurity/grsec_link.c linux-3.0.4/grsecurity/grsec_link.c
51610 --- linux-3.0.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51611 +++ linux-3.0.4/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
51612 @@ -0,0 +1,43 @@
51613 +#include <linux/kernel.h>
51614 +#include <linux/sched.h>
51615 +#include <linux/fs.h>
51616 +#include <linux/file.h>
51617 +#include <linux/grinternal.h>
51618 +
51619 +int
51620 +gr_handle_follow_link(const struct inode *parent,
51621 + const struct inode *inode,
51622 + const struct dentry *dentry, const struct vfsmount *mnt)
51623 +{
51624 +#ifdef CONFIG_GRKERNSEC_LINK
51625 + const struct cred *cred = current_cred();
51626 +
51627 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51628 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51629 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51630 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51631 + return -EACCES;
51632 + }
51633 +#endif
51634 + return 0;
51635 +}
51636 +
51637 +int
51638 +gr_handle_hardlink(const struct dentry *dentry,
51639 + const struct vfsmount *mnt,
51640 + struct inode *inode, const int mode, const char *to)
51641 +{
51642 +#ifdef CONFIG_GRKERNSEC_LINK
51643 + const struct cred *cred = current_cred();
51644 +
51645 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51646 + (!S_ISREG(mode) || (mode & S_ISUID) ||
51647 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51648 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51649 + !capable(CAP_FOWNER) && cred->uid) {
51650 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51651 + return -EPERM;
51652 + }
51653 +#endif
51654 + return 0;
51655 +}
51656 diff -urNp linux-3.0.4/grsecurity/grsec_log.c linux-3.0.4/grsecurity/grsec_log.c
51657 --- linux-3.0.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51658 +++ linux-3.0.4/grsecurity/grsec_log.c 2011-09-14 23:17:55.000000000 -0400
51659 @@ -0,0 +1,313 @@
51660 +#include <linux/kernel.h>
51661 +#include <linux/sched.h>
51662 +#include <linux/file.h>
51663 +#include <linux/tty.h>
51664 +#include <linux/fs.h>
51665 +#include <linux/grinternal.h>
51666 +
51667 +#ifdef CONFIG_TREE_PREEMPT_RCU
51668 +#define DISABLE_PREEMPT() preempt_disable()
51669 +#define ENABLE_PREEMPT() preempt_enable()
51670 +#else
51671 +#define DISABLE_PREEMPT()
51672 +#define ENABLE_PREEMPT()
51673 +#endif
51674 +
51675 +#define BEGIN_LOCKS(x) \
51676 + DISABLE_PREEMPT(); \
51677 + rcu_read_lock(); \
51678 + read_lock(&tasklist_lock); \
51679 + read_lock(&grsec_exec_file_lock); \
51680 + if (x != GR_DO_AUDIT) \
51681 + spin_lock(&grsec_alert_lock); \
51682 + else \
51683 + spin_lock(&grsec_audit_lock)
51684 +
51685 +#define END_LOCKS(x) \
51686 + if (x != GR_DO_AUDIT) \
51687 + spin_unlock(&grsec_alert_lock); \
51688 + else \
51689 + spin_unlock(&grsec_audit_lock); \
51690 + read_unlock(&grsec_exec_file_lock); \
51691 + read_unlock(&tasklist_lock); \
51692 + rcu_read_unlock(); \
51693 + ENABLE_PREEMPT(); \
51694 + if (x == GR_DONT_AUDIT) \
51695 + gr_handle_alertkill(current)
51696 +
51697 +enum {
51698 + FLOODING,
51699 + NO_FLOODING
51700 +};
51701 +
51702 +extern char *gr_alert_log_fmt;
51703 +extern char *gr_audit_log_fmt;
51704 +extern char *gr_alert_log_buf;
51705 +extern char *gr_audit_log_buf;
51706 +
51707 +static int gr_log_start(int audit)
51708 +{
51709 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51710 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51711 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51712 + unsigned long curr_secs = get_seconds();
51713 +
51714 + if (audit == GR_DO_AUDIT)
51715 + goto set_fmt;
51716 +
51717 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
51718 + grsec_alert_wtime = curr_secs;
51719 + grsec_alert_fyet = 0;
51720 + } else if (time_before(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
51721 + if (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST) {
51722 + grsec_alert_fyet++;
51723 + } else if (grsec_alert_fyet && grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51724 + grsec_alert_wtime = curr_secs;
51725 + grsec_alert_fyet++;
51726 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51727 + return FLOODING;
51728 + }
51729 + } else return FLOODING;
51730 +
51731 +set_fmt:
51732 + memset(buf, 0, PAGE_SIZE);
51733 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
51734 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51735 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51736 + } else if (current->signal->curr_ip) {
51737 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51738 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51739 + } else if (gr_acl_is_enabled()) {
51740 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51741 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51742 + } else {
51743 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
51744 + strcpy(buf, fmt);
51745 + }
51746 +
51747 + return NO_FLOODING;
51748 +}
51749 +
51750 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51751 + __attribute__ ((format (printf, 2, 0)));
51752 +
51753 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51754 +{
51755 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51756 + unsigned int len = strlen(buf);
51757 +
51758 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51759 +
51760 + return;
51761 +}
51762 +
51763 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51764 + __attribute__ ((format (printf, 2, 3)));
51765 +
51766 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51767 +{
51768 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51769 + unsigned int len = strlen(buf);
51770 + va_list ap;
51771 +
51772 + va_start(ap, msg);
51773 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51774 + va_end(ap);
51775 +
51776 + return;
51777 +}
51778 +
51779 +static void gr_log_end(int audit)
51780 +{
51781 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51782 + unsigned int len = strlen(buf);
51783 +
51784 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51785 + printk("%s\n", buf);
51786 +
51787 + return;
51788 +}
51789 +
51790 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51791 +{
51792 + int logtype;
51793 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51794 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51795 + void *voidptr = NULL;
51796 + int num1 = 0, num2 = 0;
51797 + unsigned long ulong1 = 0, ulong2 = 0;
51798 + struct dentry *dentry = NULL;
51799 + struct vfsmount *mnt = NULL;
51800 + struct file *file = NULL;
51801 + struct task_struct *task = NULL;
51802 + const struct cred *cred, *pcred;
51803 + va_list ap;
51804 +
51805 + BEGIN_LOCKS(audit);
51806 + logtype = gr_log_start(audit);
51807 + if (logtype == FLOODING) {
51808 + END_LOCKS(audit);
51809 + return;
51810 + }
51811 + va_start(ap, argtypes);
51812 + switch (argtypes) {
51813 + case GR_TTYSNIFF:
51814 + task = va_arg(ap, struct task_struct *);
51815 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51816 + break;
51817 + case GR_SYSCTL_HIDDEN:
51818 + str1 = va_arg(ap, char *);
51819 + gr_log_middle_varargs(audit, msg, result, str1);
51820 + break;
51821 + case GR_RBAC:
51822 + dentry = va_arg(ap, struct dentry *);
51823 + mnt = va_arg(ap, struct vfsmount *);
51824 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51825 + break;
51826 + case GR_RBAC_STR:
51827 + dentry = va_arg(ap, struct dentry *);
51828 + mnt = va_arg(ap, struct vfsmount *);
51829 + str1 = va_arg(ap, char *);
51830 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51831 + break;
51832 + case GR_STR_RBAC:
51833 + str1 = va_arg(ap, char *);
51834 + dentry = va_arg(ap, struct dentry *);
51835 + mnt = va_arg(ap, struct vfsmount *);
51836 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51837 + break;
51838 + case GR_RBAC_MODE2:
51839 + dentry = va_arg(ap, struct dentry *);
51840 + mnt = va_arg(ap, struct vfsmount *);
51841 + str1 = va_arg(ap, char *);
51842 + str2 = va_arg(ap, char *);
51843 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51844 + break;
51845 + case GR_RBAC_MODE3:
51846 + dentry = va_arg(ap, struct dentry *);
51847 + mnt = va_arg(ap, struct vfsmount *);
51848 + str1 = va_arg(ap, char *);
51849 + str2 = va_arg(ap, char *);
51850 + str3 = va_arg(ap, char *);
51851 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51852 + break;
51853 + case GR_FILENAME:
51854 + dentry = va_arg(ap, struct dentry *);
51855 + mnt = va_arg(ap, struct vfsmount *);
51856 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51857 + break;
51858 + case GR_STR_FILENAME:
51859 + str1 = va_arg(ap, char *);
51860 + dentry = va_arg(ap, struct dentry *);
51861 + mnt = va_arg(ap, struct vfsmount *);
51862 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51863 + break;
51864 + case GR_FILENAME_STR:
51865 + dentry = va_arg(ap, struct dentry *);
51866 + mnt = va_arg(ap, struct vfsmount *);
51867 + str1 = va_arg(ap, char *);
51868 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51869 + break;
51870 + case GR_FILENAME_TWO_INT:
51871 + dentry = va_arg(ap, struct dentry *);
51872 + mnt = va_arg(ap, struct vfsmount *);
51873 + num1 = va_arg(ap, int);
51874 + num2 = va_arg(ap, int);
51875 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51876 + break;
51877 + case GR_FILENAME_TWO_INT_STR:
51878 + dentry = va_arg(ap, struct dentry *);
51879 + mnt = va_arg(ap, struct vfsmount *);
51880 + num1 = va_arg(ap, int);
51881 + num2 = va_arg(ap, int);
51882 + str1 = va_arg(ap, char *);
51883 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51884 + break;
51885 + case GR_TEXTREL:
51886 + file = va_arg(ap, struct file *);
51887 + ulong1 = va_arg(ap, unsigned long);
51888 + ulong2 = va_arg(ap, unsigned long);
51889 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51890 + break;
51891 + case GR_PTRACE:
51892 + task = va_arg(ap, struct task_struct *);
51893 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51894 + break;
51895 + case GR_RESOURCE:
51896 + task = va_arg(ap, struct task_struct *);
51897 + cred = __task_cred(task);
51898 + pcred = __task_cred(task->real_parent);
51899 + ulong1 = va_arg(ap, unsigned long);
51900 + str1 = va_arg(ap, char *);
51901 + ulong2 = va_arg(ap, unsigned long);
51902 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51903 + break;
51904 + case GR_CAP:
51905 + task = va_arg(ap, struct task_struct *);
51906 + cred = __task_cred(task);
51907 + pcred = __task_cred(task->real_parent);
51908 + str1 = va_arg(ap, char *);
51909 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51910 + break;
51911 + case GR_SIG:
51912 + str1 = va_arg(ap, char *);
51913 + voidptr = va_arg(ap, void *);
51914 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51915 + break;
51916 + case GR_SIG2:
51917 + task = va_arg(ap, struct task_struct *);
51918 + cred = __task_cred(task);
51919 + pcred = __task_cred(task->real_parent);
51920 + num1 = va_arg(ap, int);
51921 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51922 + break;
51923 + case GR_CRASH1:
51924 + task = va_arg(ap, struct task_struct *);
51925 + cred = __task_cred(task);
51926 + pcred = __task_cred(task->real_parent);
51927 + ulong1 = va_arg(ap, unsigned long);
51928 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51929 + break;
51930 + case GR_CRASH2:
51931 + task = va_arg(ap, struct task_struct *);
51932 + cred = __task_cred(task);
51933 + pcred = __task_cred(task->real_parent);
51934 + ulong1 = va_arg(ap, unsigned long);
51935 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51936 + break;
51937 + case GR_RWXMAP:
51938 + file = va_arg(ap, struct file *);
51939 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51940 + break;
51941 + case GR_PSACCT:
51942 + {
51943 + unsigned int wday, cday;
51944 + __u8 whr, chr;
51945 + __u8 wmin, cmin;
51946 + __u8 wsec, csec;
51947 + char cur_tty[64] = { 0 };
51948 + char parent_tty[64] = { 0 };
51949 +
51950 + task = va_arg(ap, struct task_struct *);
51951 + wday = va_arg(ap, unsigned int);
51952 + cday = va_arg(ap, unsigned int);
51953 + whr = va_arg(ap, int);
51954 + chr = va_arg(ap, int);
51955 + wmin = va_arg(ap, int);
51956 + cmin = va_arg(ap, int);
51957 + wsec = va_arg(ap, int);
51958 + csec = va_arg(ap, int);
51959 + ulong1 = va_arg(ap, unsigned long);
51960 + cred = __task_cred(task);
51961 + pcred = __task_cred(task->real_parent);
51962 +
51963 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51964 + }
51965 + break;
51966 + default:
51967 + gr_log_middle(audit, msg, ap);
51968 + }
51969 + va_end(ap);
51970 + gr_log_end(audit);
51971 + END_LOCKS(audit);
51972 +}
51973 diff -urNp linux-3.0.4/grsecurity/grsec_mem.c linux-3.0.4/grsecurity/grsec_mem.c
51974 --- linux-3.0.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51975 +++ linux-3.0.4/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
51976 @@ -0,0 +1,33 @@
51977 +#include <linux/kernel.h>
51978 +#include <linux/sched.h>
51979 +#include <linux/mm.h>
51980 +#include <linux/mman.h>
51981 +#include <linux/grinternal.h>
51982 +
51983 +void
51984 +gr_handle_ioperm(void)
51985 +{
51986 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51987 + return;
51988 +}
51989 +
51990 +void
51991 +gr_handle_iopl(void)
51992 +{
51993 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51994 + return;
51995 +}
51996 +
51997 +void
51998 +gr_handle_mem_readwrite(u64 from, u64 to)
51999 +{
52000 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
52001 + return;
52002 +}
52003 +
52004 +void
52005 +gr_handle_vm86(void)
52006 +{
52007 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
52008 + return;
52009 +}
52010 diff -urNp linux-3.0.4/grsecurity/grsec_mount.c linux-3.0.4/grsecurity/grsec_mount.c
52011 --- linux-3.0.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
52012 +++ linux-3.0.4/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
52013 @@ -0,0 +1,62 @@
52014 +#include <linux/kernel.h>
52015 +#include <linux/sched.h>
52016 +#include <linux/mount.h>
52017 +#include <linux/grsecurity.h>
52018 +#include <linux/grinternal.h>
52019 +
52020 +void
52021 +gr_log_remount(const char *devname, const int retval)
52022 +{
52023 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52024 + if (grsec_enable_mount && (retval >= 0))
52025 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
52026 +#endif
52027 + return;
52028 +}
52029 +
52030 +void
52031 +gr_log_unmount(const char *devname, const int retval)
52032 +{
52033 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52034 + if (grsec_enable_mount && (retval >= 0))
52035 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
52036 +#endif
52037 + return;
52038 +}
52039 +
52040 +void
52041 +gr_log_mount(const char *from, const char *to, const int retval)
52042 +{
52043 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52044 + if (grsec_enable_mount && (retval >= 0))
52045 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
52046 +#endif
52047 + return;
52048 +}
52049 +
52050 +int
52051 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
52052 +{
52053 +#ifdef CONFIG_GRKERNSEC_ROFS
52054 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
52055 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
52056 + return -EPERM;
52057 + } else
52058 + return 0;
52059 +#endif
52060 + return 0;
52061 +}
52062 +
52063 +int
52064 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
52065 +{
52066 +#ifdef CONFIG_GRKERNSEC_ROFS
52067 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
52068 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
52069 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
52070 + return -EPERM;
52071 + } else
52072 + return 0;
52073 +#endif
52074 + return 0;
52075 +}
52076 diff -urNp linux-3.0.4/grsecurity/grsec_pax.c linux-3.0.4/grsecurity/grsec_pax.c
52077 --- linux-3.0.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
52078 +++ linux-3.0.4/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
52079 @@ -0,0 +1,36 @@
52080 +#include <linux/kernel.h>
52081 +#include <linux/sched.h>
52082 +#include <linux/mm.h>
52083 +#include <linux/file.h>
52084 +#include <linux/grinternal.h>
52085 +#include <linux/grsecurity.h>
52086 +
52087 +void
52088 +gr_log_textrel(struct vm_area_struct * vma)
52089 +{
52090 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52091 + if (grsec_enable_audit_textrel)
52092 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
52093 +#endif
52094 + return;
52095 +}
52096 +
52097 +void
52098 +gr_log_rwxmmap(struct file *file)
52099 +{
52100 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52101 + if (grsec_enable_log_rwxmaps)
52102 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
52103 +#endif
52104 + return;
52105 +}
52106 +
52107 +void
52108 +gr_log_rwxmprotect(struct file *file)
52109 +{
52110 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52111 + if (grsec_enable_log_rwxmaps)
52112 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
52113 +#endif
52114 + return;
52115 +}
52116 diff -urNp linux-3.0.4/grsecurity/grsec_ptrace.c linux-3.0.4/grsecurity/grsec_ptrace.c
52117 --- linux-3.0.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
52118 +++ linux-3.0.4/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
52119 @@ -0,0 +1,14 @@
52120 +#include <linux/kernel.h>
52121 +#include <linux/sched.h>
52122 +#include <linux/grinternal.h>
52123 +#include <linux/grsecurity.h>
52124 +
52125 +void
52126 +gr_audit_ptrace(struct task_struct *task)
52127 +{
52128 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52129 + if (grsec_enable_audit_ptrace)
52130 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
52131 +#endif
52132 + return;
52133 +}
52134 diff -urNp linux-3.0.4/grsecurity/grsec_sig.c linux-3.0.4/grsecurity/grsec_sig.c
52135 --- linux-3.0.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
52136 +++ linux-3.0.4/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
52137 @@ -0,0 +1,206 @@
52138 +#include <linux/kernel.h>
52139 +#include <linux/sched.h>
52140 +#include <linux/delay.h>
52141 +#include <linux/grsecurity.h>
52142 +#include <linux/grinternal.h>
52143 +#include <linux/hardirq.h>
52144 +
52145 +char *signames[] = {
52146 + [SIGSEGV] = "Segmentation fault",
52147 + [SIGILL] = "Illegal instruction",
52148 + [SIGABRT] = "Abort",
52149 + [SIGBUS] = "Invalid alignment/Bus error"
52150 +};
52151 +
52152 +void
52153 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
52154 +{
52155 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52156 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
52157 + (sig == SIGABRT) || (sig == SIGBUS))) {
52158 + if (t->pid == current->pid) {
52159 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
52160 + } else {
52161 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
52162 + }
52163 + }
52164 +#endif
52165 + return;
52166 +}
52167 +
52168 +int
52169 +gr_handle_signal(const struct task_struct *p, const int sig)
52170 +{
52171 +#ifdef CONFIG_GRKERNSEC
52172 + if (current->pid > 1 && gr_check_protected_task(p)) {
52173 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
52174 + return -EPERM;
52175 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
52176 + return -EPERM;
52177 + }
52178 +#endif
52179 + return 0;
52180 +}
52181 +
52182 +#ifdef CONFIG_GRKERNSEC
52183 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
52184 +
52185 +int gr_fake_force_sig(int sig, struct task_struct *t)
52186 +{
52187 + unsigned long int flags;
52188 + int ret, blocked, ignored;
52189 + struct k_sigaction *action;
52190 +
52191 + spin_lock_irqsave(&t->sighand->siglock, flags);
52192 + action = &t->sighand->action[sig-1];
52193 + ignored = action->sa.sa_handler == SIG_IGN;
52194 + blocked = sigismember(&t->blocked, sig);
52195 + if (blocked || ignored) {
52196 + action->sa.sa_handler = SIG_DFL;
52197 + if (blocked) {
52198 + sigdelset(&t->blocked, sig);
52199 + recalc_sigpending_and_wake(t);
52200 + }
52201 + }
52202 + if (action->sa.sa_handler == SIG_DFL)
52203 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
52204 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
52205 +
52206 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
52207 +
52208 + return ret;
52209 +}
52210 +#endif
52211 +
52212 +#ifdef CONFIG_GRKERNSEC_BRUTE
52213 +#define GR_USER_BAN_TIME (15 * 60)
52214 +
52215 +static int __get_dumpable(unsigned long mm_flags)
52216 +{
52217 + int ret;
52218 +
52219 + ret = mm_flags & MMF_DUMPABLE_MASK;
52220 + return (ret >= 2) ? 2 : ret;
52221 +}
52222 +#endif
52223 +
52224 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
52225 +{
52226 +#ifdef CONFIG_GRKERNSEC_BRUTE
52227 + uid_t uid = 0;
52228 +
52229 + if (!grsec_enable_brute)
52230 + return;
52231 +
52232 + rcu_read_lock();
52233 + read_lock(&tasklist_lock);
52234 + read_lock(&grsec_exec_file_lock);
52235 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
52236 + p->real_parent->brute = 1;
52237 + else {
52238 + const struct cred *cred = __task_cred(p), *cred2;
52239 + struct task_struct *tsk, *tsk2;
52240 +
52241 + if (!__get_dumpable(mm_flags) && cred->uid) {
52242 + struct user_struct *user;
52243 +
52244 + uid = cred->uid;
52245 +
52246 + /* this is put upon execution past expiration */
52247 + user = find_user(uid);
52248 + if (user == NULL)
52249 + goto unlock;
52250 + user->banned = 1;
52251 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
52252 + if (user->ban_expires == ~0UL)
52253 + user->ban_expires--;
52254 +
52255 + do_each_thread(tsk2, tsk) {
52256 + cred2 = __task_cred(tsk);
52257 + if (tsk != p && cred2->uid == uid)
52258 + gr_fake_force_sig(SIGKILL, tsk);
52259 + } while_each_thread(tsk2, tsk);
52260 + }
52261 + }
52262 +unlock:
52263 + read_unlock(&grsec_exec_file_lock);
52264 + read_unlock(&tasklist_lock);
52265 + rcu_read_unlock();
52266 +
52267 + if (uid)
52268 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
52269 +
52270 +#endif
52271 + return;
52272 +}
52273 +
52274 +void gr_handle_brute_check(void)
52275 +{
52276 +#ifdef CONFIG_GRKERNSEC_BRUTE
52277 + if (current->brute)
52278 + msleep(30 * 1000);
52279 +#endif
52280 + return;
52281 +}
52282 +
52283 +void gr_handle_kernel_exploit(void)
52284 +{
52285 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
52286 + const struct cred *cred;
52287 + struct task_struct *tsk, *tsk2;
52288 + struct user_struct *user;
52289 + uid_t uid;
52290 +
52291 + if (in_irq() || in_serving_softirq() || in_nmi())
52292 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
52293 +
52294 + uid = current_uid();
52295 +
52296 + if (uid == 0)
52297 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
52298 + else {
52299 + /* kill all the processes of this user, hold a reference
52300 + to their creds struct, and prevent them from creating
52301 + another process until system reset
52302 + */
52303 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
52304 + /* we intentionally leak this ref */
52305 + user = get_uid(current->cred->user);
52306 + if (user) {
52307 + user->banned = 1;
52308 + user->ban_expires = ~0UL;
52309 + }
52310 +
52311 + read_lock(&tasklist_lock);
52312 + do_each_thread(tsk2, tsk) {
52313 + cred = __task_cred(tsk);
52314 + if (cred->uid == uid)
52315 + gr_fake_force_sig(SIGKILL, tsk);
52316 + } while_each_thread(tsk2, tsk);
52317 + read_unlock(&tasklist_lock);
52318 + }
52319 +#endif
52320 +}
52321 +
52322 +int __gr_process_user_ban(struct user_struct *user)
52323 +{
52324 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52325 + if (unlikely(user->banned)) {
52326 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
52327 + user->banned = 0;
52328 + user->ban_expires = 0;
52329 + free_uid(user);
52330 + } else
52331 + return -EPERM;
52332 + }
52333 +#endif
52334 + return 0;
52335 +}
52336 +
52337 +int gr_process_user_ban(void)
52338 +{
52339 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52340 + return __gr_process_user_ban(current->cred->user);
52341 +#endif
52342 + return 0;
52343 +}
52344 diff -urNp linux-3.0.4/grsecurity/grsec_sock.c linux-3.0.4/grsecurity/grsec_sock.c
52345 --- linux-3.0.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
52346 +++ linux-3.0.4/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
52347 @@ -0,0 +1,244 @@
52348 +#include <linux/kernel.h>
52349 +#include <linux/module.h>
52350 +#include <linux/sched.h>
52351 +#include <linux/file.h>
52352 +#include <linux/net.h>
52353 +#include <linux/in.h>
52354 +#include <linux/ip.h>
52355 +#include <net/sock.h>
52356 +#include <net/inet_sock.h>
52357 +#include <linux/grsecurity.h>
52358 +#include <linux/grinternal.h>
52359 +#include <linux/gracl.h>
52360 +
52361 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
52362 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
52363 +
52364 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
52365 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
52366 +
52367 +#ifdef CONFIG_UNIX_MODULE
52368 +EXPORT_SYMBOL(gr_acl_handle_unix);
52369 +EXPORT_SYMBOL(gr_acl_handle_mknod);
52370 +EXPORT_SYMBOL(gr_handle_chroot_unix);
52371 +EXPORT_SYMBOL(gr_handle_create);
52372 +#endif
52373 +
52374 +#ifdef CONFIG_GRKERNSEC
52375 +#define gr_conn_table_size 32749
52376 +struct conn_table_entry {
52377 + struct conn_table_entry *next;
52378 + struct signal_struct *sig;
52379 +};
52380 +
52381 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
52382 +DEFINE_SPINLOCK(gr_conn_table_lock);
52383 +
52384 +extern const char * gr_socktype_to_name(unsigned char type);
52385 +extern const char * gr_proto_to_name(unsigned char proto);
52386 +extern const char * gr_sockfamily_to_name(unsigned char family);
52387 +
52388 +static __inline__ int
52389 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
52390 +{
52391 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
52392 +}
52393 +
52394 +static __inline__ int
52395 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
52396 + __u16 sport, __u16 dport)
52397 +{
52398 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
52399 + sig->gr_sport == sport && sig->gr_dport == dport))
52400 + return 1;
52401 + else
52402 + return 0;
52403 +}
52404 +
52405 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
52406 +{
52407 + struct conn_table_entry **match;
52408 + unsigned int index;
52409 +
52410 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52411 + sig->gr_sport, sig->gr_dport,
52412 + gr_conn_table_size);
52413 +
52414 + newent->sig = sig;
52415 +
52416 + match = &gr_conn_table[index];
52417 + newent->next = *match;
52418 + *match = newent;
52419 +
52420 + return;
52421 +}
52422 +
52423 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
52424 +{
52425 + struct conn_table_entry *match, *last = NULL;
52426 + unsigned int index;
52427 +
52428 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52429 + sig->gr_sport, sig->gr_dport,
52430 + gr_conn_table_size);
52431 +
52432 + match = gr_conn_table[index];
52433 + while (match && !conn_match(match->sig,
52434 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
52435 + sig->gr_dport)) {
52436 + last = match;
52437 + match = match->next;
52438 + }
52439 +
52440 + if (match) {
52441 + if (last)
52442 + last->next = match->next;
52443 + else
52444 + gr_conn_table[index] = NULL;
52445 + kfree(match);
52446 + }
52447 +
52448 + return;
52449 +}
52450 +
52451 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
52452 + __u16 sport, __u16 dport)
52453 +{
52454 + struct conn_table_entry *match;
52455 + unsigned int index;
52456 +
52457 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
52458 +
52459 + match = gr_conn_table[index];
52460 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
52461 + match = match->next;
52462 +
52463 + if (match)
52464 + return match->sig;
52465 + else
52466 + return NULL;
52467 +}
52468 +
52469 +#endif
52470 +
52471 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
52472 +{
52473 +#ifdef CONFIG_GRKERNSEC
52474 + struct signal_struct *sig = task->signal;
52475 + struct conn_table_entry *newent;
52476 +
52477 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
52478 + if (newent == NULL)
52479 + return;
52480 + /* no bh lock needed since we are called with bh disabled */
52481 + spin_lock(&gr_conn_table_lock);
52482 + gr_del_task_from_ip_table_nolock(sig);
52483 + sig->gr_saddr = inet->inet_rcv_saddr;
52484 + sig->gr_daddr = inet->inet_daddr;
52485 + sig->gr_sport = inet->inet_sport;
52486 + sig->gr_dport = inet->inet_dport;
52487 + gr_add_to_task_ip_table_nolock(sig, newent);
52488 + spin_unlock(&gr_conn_table_lock);
52489 +#endif
52490 + return;
52491 +}
52492 +
52493 +void gr_del_task_from_ip_table(struct task_struct *task)
52494 +{
52495 +#ifdef CONFIG_GRKERNSEC
52496 + spin_lock_bh(&gr_conn_table_lock);
52497 + gr_del_task_from_ip_table_nolock(task->signal);
52498 + spin_unlock_bh(&gr_conn_table_lock);
52499 +#endif
52500 + return;
52501 +}
52502 +
52503 +void
52504 +gr_attach_curr_ip(const struct sock *sk)
52505 +{
52506 +#ifdef CONFIG_GRKERNSEC
52507 + struct signal_struct *p, *set;
52508 + const struct inet_sock *inet = inet_sk(sk);
52509 +
52510 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
52511 + return;
52512 +
52513 + set = current->signal;
52514 +
52515 + spin_lock_bh(&gr_conn_table_lock);
52516 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
52517 + inet->inet_dport, inet->inet_sport);
52518 + if (unlikely(p != NULL)) {
52519 + set->curr_ip = p->curr_ip;
52520 + set->used_accept = 1;
52521 + gr_del_task_from_ip_table_nolock(p);
52522 + spin_unlock_bh(&gr_conn_table_lock);
52523 + return;
52524 + }
52525 + spin_unlock_bh(&gr_conn_table_lock);
52526 +
52527 + set->curr_ip = inet->inet_daddr;
52528 + set->used_accept = 1;
52529 +#endif
52530 + return;
52531 +}
52532 +
52533 +int
52534 +gr_handle_sock_all(const int family, const int type, const int protocol)
52535 +{
52536 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52537 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
52538 + (family != AF_UNIX)) {
52539 + if (family == AF_INET)
52540 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
52541 + else
52542 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
52543 + return -EACCES;
52544 + }
52545 +#endif
52546 + return 0;
52547 +}
52548 +
52549 +int
52550 +gr_handle_sock_server(const struct sockaddr *sck)
52551 +{
52552 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52553 + if (grsec_enable_socket_server &&
52554 + in_group_p(grsec_socket_server_gid) &&
52555 + sck && (sck->sa_family != AF_UNIX) &&
52556 + (sck->sa_family != AF_LOCAL)) {
52557 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52558 + return -EACCES;
52559 + }
52560 +#endif
52561 + return 0;
52562 +}
52563 +
52564 +int
52565 +gr_handle_sock_server_other(const struct sock *sck)
52566 +{
52567 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52568 + if (grsec_enable_socket_server &&
52569 + in_group_p(grsec_socket_server_gid) &&
52570 + sck && (sck->sk_family != AF_UNIX) &&
52571 + (sck->sk_family != AF_LOCAL)) {
52572 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52573 + return -EACCES;
52574 + }
52575 +#endif
52576 + return 0;
52577 +}
52578 +
52579 +int
52580 +gr_handle_sock_client(const struct sockaddr *sck)
52581 +{
52582 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52583 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52584 + sck && (sck->sa_family != AF_UNIX) &&
52585 + (sck->sa_family != AF_LOCAL)) {
52586 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52587 + return -EACCES;
52588 + }
52589 +#endif
52590 + return 0;
52591 +}
52592 diff -urNp linux-3.0.4/grsecurity/grsec_sysctl.c linux-3.0.4/grsecurity/grsec_sysctl.c
52593 --- linux-3.0.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52594 +++ linux-3.0.4/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
52595 @@ -0,0 +1,433 @@
52596 +#include <linux/kernel.h>
52597 +#include <linux/sched.h>
52598 +#include <linux/sysctl.h>
52599 +#include <linux/grsecurity.h>
52600 +#include <linux/grinternal.h>
52601 +
52602 +int
52603 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52604 +{
52605 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52606 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52607 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52608 + return -EACCES;
52609 + }
52610 +#endif
52611 + return 0;
52612 +}
52613 +
52614 +#ifdef CONFIG_GRKERNSEC_ROFS
52615 +static int __maybe_unused one = 1;
52616 +#endif
52617 +
52618 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52619 +struct ctl_table grsecurity_table[] = {
52620 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52621 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52622 +#ifdef CONFIG_GRKERNSEC_IO
52623 + {
52624 + .procname = "disable_priv_io",
52625 + .data = &grsec_disable_privio,
52626 + .maxlen = sizeof(int),
52627 + .mode = 0600,
52628 + .proc_handler = &proc_dointvec,
52629 + },
52630 +#endif
52631 +#endif
52632 +#ifdef CONFIG_GRKERNSEC_LINK
52633 + {
52634 + .procname = "linking_restrictions",
52635 + .data = &grsec_enable_link,
52636 + .maxlen = sizeof(int),
52637 + .mode = 0600,
52638 + .proc_handler = &proc_dointvec,
52639 + },
52640 +#endif
52641 +#ifdef CONFIG_GRKERNSEC_BRUTE
52642 + {
52643 + .procname = "deter_bruteforce",
52644 + .data = &grsec_enable_brute,
52645 + .maxlen = sizeof(int),
52646 + .mode = 0600,
52647 + .proc_handler = &proc_dointvec,
52648 + },
52649 +#endif
52650 +#ifdef CONFIG_GRKERNSEC_FIFO
52651 + {
52652 + .procname = "fifo_restrictions",
52653 + .data = &grsec_enable_fifo,
52654 + .maxlen = sizeof(int),
52655 + .mode = 0600,
52656 + .proc_handler = &proc_dointvec,
52657 + },
52658 +#endif
52659 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52660 + {
52661 + .procname = "ip_blackhole",
52662 + .data = &grsec_enable_blackhole,
52663 + .maxlen = sizeof(int),
52664 + .mode = 0600,
52665 + .proc_handler = &proc_dointvec,
52666 + },
52667 + {
52668 + .procname = "lastack_retries",
52669 + .data = &grsec_lastack_retries,
52670 + .maxlen = sizeof(int),
52671 + .mode = 0600,
52672 + .proc_handler = &proc_dointvec,
52673 + },
52674 +#endif
52675 +#ifdef CONFIG_GRKERNSEC_EXECLOG
52676 + {
52677 + .procname = "exec_logging",
52678 + .data = &grsec_enable_execlog,
52679 + .maxlen = sizeof(int),
52680 + .mode = 0600,
52681 + .proc_handler = &proc_dointvec,
52682 + },
52683 +#endif
52684 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52685 + {
52686 + .procname = "rwxmap_logging",
52687 + .data = &grsec_enable_log_rwxmaps,
52688 + .maxlen = sizeof(int),
52689 + .mode = 0600,
52690 + .proc_handler = &proc_dointvec,
52691 + },
52692 +#endif
52693 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52694 + {
52695 + .procname = "signal_logging",
52696 + .data = &grsec_enable_signal,
52697 + .maxlen = sizeof(int),
52698 + .mode = 0600,
52699 + .proc_handler = &proc_dointvec,
52700 + },
52701 +#endif
52702 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
52703 + {
52704 + .procname = "forkfail_logging",
52705 + .data = &grsec_enable_forkfail,
52706 + .maxlen = sizeof(int),
52707 + .mode = 0600,
52708 + .proc_handler = &proc_dointvec,
52709 + },
52710 +#endif
52711 +#ifdef CONFIG_GRKERNSEC_TIME
52712 + {
52713 + .procname = "timechange_logging",
52714 + .data = &grsec_enable_time,
52715 + .maxlen = sizeof(int),
52716 + .mode = 0600,
52717 + .proc_handler = &proc_dointvec,
52718 + },
52719 +#endif
52720 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52721 + {
52722 + .procname = "chroot_deny_shmat",
52723 + .data = &grsec_enable_chroot_shmat,
52724 + .maxlen = sizeof(int),
52725 + .mode = 0600,
52726 + .proc_handler = &proc_dointvec,
52727 + },
52728 +#endif
52729 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52730 + {
52731 + .procname = "chroot_deny_unix",
52732 + .data = &grsec_enable_chroot_unix,
52733 + .maxlen = sizeof(int),
52734 + .mode = 0600,
52735 + .proc_handler = &proc_dointvec,
52736 + },
52737 +#endif
52738 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52739 + {
52740 + .procname = "chroot_deny_mount",
52741 + .data = &grsec_enable_chroot_mount,
52742 + .maxlen = sizeof(int),
52743 + .mode = 0600,
52744 + .proc_handler = &proc_dointvec,
52745 + },
52746 +#endif
52747 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52748 + {
52749 + .procname = "chroot_deny_fchdir",
52750 + .data = &grsec_enable_chroot_fchdir,
52751 + .maxlen = sizeof(int),
52752 + .mode = 0600,
52753 + .proc_handler = &proc_dointvec,
52754 + },
52755 +#endif
52756 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52757 + {
52758 + .procname = "chroot_deny_chroot",
52759 + .data = &grsec_enable_chroot_double,
52760 + .maxlen = sizeof(int),
52761 + .mode = 0600,
52762 + .proc_handler = &proc_dointvec,
52763 + },
52764 +#endif
52765 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52766 + {
52767 + .procname = "chroot_deny_pivot",
52768 + .data = &grsec_enable_chroot_pivot,
52769 + .maxlen = sizeof(int),
52770 + .mode = 0600,
52771 + .proc_handler = &proc_dointvec,
52772 + },
52773 +#endif
52774 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52775 + {
52776 + .procname = "chroot_enforce_chdir",
52777 + .data = &grsec_enable_chroot_chdir,
52778 + .maxlen = sizeof(int),
52779 + .mode = 0600,
52780 + .proc_handler = &proc_dointvec,
52781 + },
52782 +#endif
52783 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52784 + {
52785 + .procname = "chroot_deny_chmod",
52786 + .data = &grsec_enable_chroot_chmod,
52787 + .maxlen = sizeof(int),
52788 + .mode = 0600,
52789 + .proc_handler = &proc_dointvec,
52790 + },
52791 +#endif
52792 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52793 + {
52794 + .procname = "chroot_deny_mknod",
52795 + .data = &grsec_enable_chroot_mknod,
52796 + .maxlen = sizeof(int),
52797 + .mode = 0600,
52798 + .proc_handler = &proc_dointvec,
52799 + },
52800 +#endif
52801 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52802 + {
52803 + .procname = "chroot_restrict_nice",
52804 + .data = &grsec_enable_chroot_nice,
52805 + .maxlen = sizeof(int),
52806 + .mode = 0600,
52807 + .proc_handler = &proc_dointvec,
52808 + },
52809 +#endif
52810 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52811 + {
52812 + .procname = "chroot_execlog",
52813 + .data = &grsec_enable_chroot_execlog,
52814 + .maxlen = sizeof(int),
52815 + .mode = 0600,
52816 + .proc_handler = &proc_dointvec,
52817 + },
52818 +#endif
52819 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52820 + {
52821 + .procname = "chroot_caps",
52822 + .data = &grsec_enable_chroot_caps,
52823 + .maxlen = sizeof(int),
52824 + .mode = 0600,
52825 + .proc_handler = &proc_dointvec,
52826 + },
52827 +#endif
52828 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52829 + {
52830 + .procname = "chroot_deny_sysctl",
52831 + .data = &grsec_enable_chroot_sysctl,
52832 + .maxlen = sizeof(int),
52833 + .mode = 0600,
52834 + .proc_handler = &proc_dointvec,
52835 + },
52836 +#endif
52837 +#ifdef CONFIG_GRKERNSEC_TPE
52838 + {
52839 + .procname = "tpe",
52840 + .data = &grsec_enable_tpe,
52841 + .maxlen = sizeof(int),
52842 + .mode = 0600,
52843 + .proc_handler = &proc_dointvec,
52844 + },
52845 + {
52846 + .procname = "tpe_gid",
52847 + .data = &grsec_tpe_gid,
52848 + .maxlen = sizeof(int),
52849 + .mode = 0600,
52850 + .proc_handler = &proc_dointvec,
52851 + },
52852 +#endif
52853 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52854 + {
52855 + .procname = "tpe_invert",
52856 + .data = &grsec_enable_tpe_invert,
52857 + .maxlen = sizeof(int),
52858 + .mode = 0600,
52859 + .proc_handler = &proc_dointvec,
52860 + },
52861 +#endif
52862 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52863 + {
52864 + .procname = "tpe_restrict_all",
52865 + .data = &grsec_enable_tpe_all,
52866 + .maxlen = sizeof(int),
52867 + .mode = 0600,
52868 + .proc_handler = &proc_dointvec,
52869 + },
52870 +#endif
52871 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52872 + {
52873 + .procname = "socket_all",
52874 + .data = &grsec_enable_socket_all,
52875 + .maxlen = sizeof(int),
52876 + .mode = 0600,
52877 + .proc_handler = &proc_dointvec,
52878 + },
52879 + {
52880 + .procname = "socket_all_gid",
52881 + .data = &grsec_socket_all_gid,
52882 + .maxlen = sizeof(int),
52883 + .mode = 0600,
52884 + .proc_handler = &proc_dointvec,
52885 + },
52886 +#endif
52887 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52888 + {
52889 + .procname = "socket_client",
52890 + .data = &grsec_enable_socket_client,
52891 + .maxlen = sizeof(int),
52892 + .mode = 0600,
52893 + .proc_handler = &proc_dointvec,
52894 + },
52895 + {
52896 + .procname = "socket_client_gid",
52897 + .data = &grsec_socket_client_gid,
52898 + .maxlen = sizeof(int),
52899 + .mode = 0600,
52900 + .proc_handler = &proc_dointvec,
52901 + },
52902 +#endif
52903 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52904 + {
52905 + .procname = "socket_server",
52906 + .data = &grsec_enable_socket_server,
52907 + .maxlen = sizeof(int),
52908 + .mode = 0600,
52909 + .proc_handler = &proc_dointvec,
52910 + },
52911 + {
52912 + .procname = "socket_server_gid",
52913 + .data = &grsec_socket_server_gid,
52914 + .maxlen = sizeof(int),
52915 + .mode = 0600,
52916 + .proc_handler = &proc_dointvec,
52917 + },
52918 +#endif
52919 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52920 + {
52921 + .procname = "audit_group",
52922 + .data = &grsec_enable_group,
52923 + .maxlen = sizeof(int),
52924 + .mode = 0600,
52925 + .proc_handler = &proc_dointvec,
52926 + },
52927 + {
52928 + .procname = "audit_gid",
52929 + .data = &grsec_audit_gid,
52930 + .maxlen = sizeof(int),
52931 + .mode = 0600,
52932 + .proc_handler = &proc_dointvec,
52933 + },
52934 +#endif
52935 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52936 + {
52937 + .procname = "audit_chdir",
52938 + .data = &grsec_enable_chdir,
52939 + .maxlen = sizeof(int),
52940 + .mode = 0600,
52941 + .proc_handler = &proc_dointvec,
52942 + },
52943 +#endif
52944 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52945 + {
52946 + .procname = "audit_mount",
52947 + .data = &grsec_enable_mount,
52948 + .maxlen = sizeof(int),
52949 + .mode = 0600,
52950 + .proc_handler = &proc_dointvec,
52951 + },
52952 +#endif
52953 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52954 + {
52955 + .procname = "audit_textrel",
52956 + .data = &grsec_enable_audit_textrel,
52957 + .maxlen = sizeof(int),
52958 + .mode = 0600,
52959 + .proc_handler = &proc_dointvec,
52960 + },
52961 +#endif
52962 +#ifdef CONFIG_GRKERNSEC_DMESG
52963 + {
52964 + .procname = "dmesg",
52965 + .data = &grsec_enable_dmesg,
52966 + .maxlen = sizeof(int),
52967 + .mode = 0600,
52968 + .proc_handler = &proc_dointvec,
52969 + },
52970 +#endif
52971 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52972 + {
52973 + .procname = "chroot_findtask",
52974 + .data = &grsec_enable_chroot_findtask,
52975 + .maxlen = sizeof(int),
52976 + .mode = 0600,
52977 + .proc_handler = &proc_dointvec,
52978 + },
52979 +#endif
52980 +#ifdef CONFIG_GRKERNSEC_RESLOG
52981 + {
52982 + .procname = "resource_logging",
52983 + .data = &grsec_resource_logging,
52984 + .maxlen = sizeof(int),
52985 + .mode = 0600,
52986 + .proc_handler = &proc_dointvec,
52987 + },
52988 +#endif
52989 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52990 + {
52991 + .procname = "audit_ptrace",
52992 + .data = &grsec_enable_audit_ptrace,
52993 + .maxlen = sizeof(int),
52994 + .mode = 0600,
52995 + .proc_handler = &proc_dointvec,
52996 + },
52997 +#endif
52998 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52999 + {
53000 + .procname = "harden_ptrace",
53001 + .data = &grsec_enable_harden_ptrace,
53002 + .maxlen = sizeof(int),
53003 + .mode = 0600,
53004 + .proc_handler = &proc_dointvec,
53005 + },
53006 +#endif
53007 + {
53008 + .procname = "grsec_lock",
53009 + .data = &grsec_lock,
53010 + .maxlen = sizeof(int),
53011 + .mode = 0600,
53012 + .proc_handler = &proc_dointvec,
53013 + },
53014 +#endif
53015 +#ifdef CONFIG_GRKERNSEC_ROFS
53016 + {
53017 + .procname = "romount_protect",
53018 + .data = &grsec_enable_rofs,
53019 + .maxlen = sizeof(int),
53020 + .mode = 0600,
53021 + .proc_handler = &proc_dointvec_minmax,
53022 + .extra1 = &one,
53023 + .extra2 = &one,
53024 + },
53025 +#endif
53026 + { }
53027 +};
53028 +#endif
53029 diff -urNp linux-3.0.4/grsecurity/grsec_time.c linux-3.0.4/grsecurity/grsec_time.c
53030 --- linux-3.0.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
53031 +++ linux-3.0.4/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
53032 @@ -0,0 +1,16 @@
53033 +#include <linux/kernel.h>
53034 +#include <linux/sched.h>
53035 +#include <linux/grinternal.h>
53036 +#include <linux/module.h>
53037 +
53038 +void
53039 +gr_log_timechange(void)
53040 +{
53041 +#ifdef CONFIG_GRKERNSEC_TIME
53042 + if (grsec_enable_time)
53043 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
53044 +#endif
53045 + return;
53046 +}
53047 +
53048 +EXPORT_SYMBOL(gr_log_timechange);
53049 diff -urNp linux-3.0.4/grsecurity/grsec_tpe.c linux-3.0.4/grsecurity/grsec_tpe.c
53050 --- linux-3.0.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
53051 +++ linux-3.0.4/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
53052 @@ -0,0 +1,39 @@
53053 +#include <linux/kernel.h>
53054 +#include <linux/sched.h>
53055 +#include <linux/file.h>
53056 +#include <linux/fs.h>
53057 +#include <linux/grinternal.h>
53058 +
53059 +extern int gr_acl_tpe_check(void);
53060 +
53061 +int
53062 +gr_tpe_allow(const struct file *file)
53063 +{
53064 +#ifdef CONFIG_GRKERNSEC
53065 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
53066 + const struct cred *cred = current_cred();
53067 +
53068 + if (cred->uid && ((grsec_enable_tpe &&
53069 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53070 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
53071 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
53072 +#else
53073 + in_group_p(grsec_tpe_gid)
53074 +#endif
53075 + ) || gr_acl_tpe_check()) &&
53076 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
53077 + (inode->i_mode & S_IWOTH))))) {
53078 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53079 + return 0;
53080 + }
53081 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53082 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
53083 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
53084 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
53085 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53086 + return 0;
53087 + }
53088 +#endif
53089 +#endif
53090 + return 1;
53091 +}
53092 diff -urNp linux-3.0.4/grsecurity/grsum.c linux-3.0.4/grsecurity/grsum.c
53093 --- linux-3.0.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
53094 +++ linux-3.0.4/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
53095 @@ -0,0 +1,61 @@
53096 +#include <linux/err.h>
53097 +#include <linux/kernel.h>
53098 +#include <linux/sched.h>
53099 +#include <linux/mm.h>
53100 +#include <linux/scatterlist.h>
53101 +#include <linux/crypto.h>
53102 +#include <linux/gracl.h>
53103 +
53104 +
53105 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
53106 +#error "crypto and sha256 must be built into the kernel"
53107 +#endif
53108 +
53109 +int
53110 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
53111 +{
53112 + char *p;
53113 + struct crypto_hash *tfm;
53114 + struct hash_desc desc;
53115 + struct scatterlist sg;
53116 + unsigned char temp_sum[GR_SHA_LEN];
53117 + volatile int retval = 0;
53118 + volatile int dummy = 0;
53119 + unsigned int i;
53120 +
53121 + sg_init_table(&sg, 1);
53122 +
53123 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
53124 + if (IS_ERR(tfm)) {
53125 + /* should never happen, since sha256 should be built in */
53126 + return 1;
53127 + }
53128 +
53129 + desc.tfm = tfm;
53130 + desc.flags = 0;
53131 +
53132 + crypto_hash_init(&desc);
53133 +
53134 + p = salt;
53135 + sg_set_buf(&sg, p, GR_SALT_LEN);
53136 + crypto_hash_update(&desc, &sg, sg.length);
53137 +
53138 + p = entry->pw;
53139 + sg_set_buf(&sg, p, strlen(p));
53140 +
53141 + crypto_hash_update(&desc, &sg, sg.length);
53142 +
53143 + crypto_hash_final(&desc, temp_sum);
53144 +
53145 + memset(entry->pw, 0, GR_PW_LEN);
53146 +
53147 + for (i = 0; i < GR_SHA_LEN; i++)
53148 + if (sum[i] != temp_sum[i])
53149 + retval = 1;
53150 + else
53151 + dummy = 1; // waste a cycle
53152 +
53153 + crypto_free_hash(tfm);
53154 +
53155 + return retval;
53156 +}
53157 diff -urNp linux-3.0.4/grsecurity/Kconfig linux-3.0.4/grsecurity/Kconfig
53158 --- linux-3.0.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
53159 +++ linux-3.0.4/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
53160 @@ -0,0 +1,1038 @@
53161 +#
53162 +# grecurity configuration
53163 +#
53164 +
53165 +menu "Grsecurity"
53166 +
53167 +config GRKERNSEC
53168 + bool "Grsecurity"
53169 + select CRYPTO
53170 + select CRYPTO_SHA256
53171 + help
53172 + If you say Y here, you will be able to configure many features
53173 + that will enhance the security of your system. It is highly
53174 + recommended that you say Y here and read through the help
53175 + for each option so that you fully understand the features and
53176 + can evaluate their usefulness for your machine.
53177 +
53178 +choice
53179 + prompt "Security Level"
53180 + depends on GRKERNSEC
53181 + default GRKERNSEC_CUSTOM
53182 +
53183 +config GRKERNSEC_LOW
53184 + bool "Low"
53185 + select GRKERNSEC_LINK
53186 + select GRKERNSEC_FIFO
53187 + select GRKERNSEC_RANDNET
53188 + select GRKERNSEC_DMESG
53189 + select GRKERNSEC_CHROOT
53190 + select GRKERNSEC_CHROOT_CHDIR
53191 +
53192 + help
53193 + If you choose this option, several of the grsecurity options will
53194 + be enabled that will give you greater protection against a number
53195 + of attacks, while assuring that none of your software will have any
53196 + conflicts with the additional security measures. If you run a lot
53197 + of unusual software, or you are having problems with the higher
53198 + security levels, you should say Y here. With this option, the
53199 + following features are enabled:
53200 +
53201 + - Linking restrictions
53202 + - FIFO restrictions
53203 + - Restricted dmesg
53204 + - Enforced chdir("/") on chroot
53205 + - Runtime module disabling
53206 +
53207 +config GRKERNSEC_MEDIUM
53208 + bool "Medium"
53209 + select PAX
53210 + select PAX_EI_PAX
53211 + select PAX_PT_PAX_FLAGS
53212 + select PAX_HAVE_ACL_FLAGS
53213 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53214 + select GRKERNSEC_CHROOT
53215 + select GRKERNSEC_CHROOT_SYSCTL
53216 + select GRKERNSEC_LINK
53217 + select GRKERNSEC_FIFO
53218 + select GRKERNSEC_DMESG
53219 + select GRKERNSEC_RANDNET
53220 + select GRKERNSEC_FORKFAIL
53221 + select GRKERNSEC_TIME
53222 + select GRKERNSEC_SIGNAL
53223 + select GRKERNSEC_CHROOT
53224 + select GRKERNSEC_CHROOT_UNIX
53225 + select GRKERNSEC_CHROOT_MOUNT
53226 + select GRKERNSEC_CHROOT_PIVOT
53227 + select GRKERNSEC_CHROOT_DOUBLE
53228 + select GRKERNSEC_CHROOT_CHDIR
53229 + select GRKERNSEC_CHROOT_MKNOD
53230 + select GRKERNSEC_PROC
53231 + select GRKERNSEC_PROC_USERGROUP
53232 + select PAX_RANDUSTACK
53233 + select PAX_ASLR
53234 + select PAX_RANDMMAP
53235 + select PAX_REFCOUNT if (X86 || SPARC64)
53236 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53237 +
53238 + help
53239 + If you say Y here, several features in addition to those included
53240 + in the low additional security level will be enabled. These
53241 + features provide even more security to your system, though in rare
53242 + cases they may be incompatible with very old or poorly written
53243 + software. If you enable this option, make sure that your auth
53244 + service (identd) is running as gid 1001. With this option,
53245 + the following features (in addition to those provided in the
53246 + low additional security level) will be enabled:
53247 +
53248 + - Failed fork logging
53249 + - Time change logging
53250 + - Signal logging
53251 + - Deny mounts in chroot
53252 + - Deny double chrooting
53253 + - Deny sysctl writes in chroot
53254 + - Deny mknod in chroot
53255 + - Deny access to abstract AF_UNIX sockets out of chroot
53256 + - Deny pivot_root in chroot
53257 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
53258 + - /proc restrictions with special GID set to 10 (usually wheel)
53259 + - Address Space Layout Randomization (ASLR)
53260 + - Prevent exploitation of most refcount overflows
53261 + - Bounds checking of copying between the kernel and userland
53262 +
53263 +config GRKERNSEC_HIGH
53264 + bool "High"
53265 + select GRKERNSEC_LINK
53266 + select GRKERNSEC_FIFO
53267 + select GRKERNSEC_DMESG
53268 + select GRKERNSEC_FORKFAIL
53269 + select GRKERNSEC_TIME
53270 + select GRKERNSEC_SIGNAL
53271 + select GRKERNSEC_CHROOT
53272 + select GRKERNSEC_CHROOT_SHMAT
53273 + select GRKERNSEC_CHROOT_UNIX
53274 + select GRKERNSEC_CHROOT_MOUNT
53275 + select GRKERNSEC_CHROOT_FCHDIR
53276 + select GRKERNSEC_CHROOT_PIVOT
53277 + select GRKERNSEC_CHROOT_DOUBLE
53278 + select GRKERNSEC_CHROOT_CHDIR
53279 + select GRKERNSEC_CHROOT_MKNOD
53280 + select GRKERNSEC_CHROOT_CAPS
53281 + select GRKERNSEC_CHROOT_SYSCTL
53282 + select GRKERNSEC_CHROOT_FINDTASK
53283 + select GRKERNSEC_SYSFS_RESTRICT
53284 + select GRKERNSEC_PROC
53285 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53286 + select GRKERNSEC_HIDESYM
53287 + select GRKERNSEC_BRUTE
53288 + select GRKERNSEC_PROC_USERGROUP
53289 + select GRKERNSEC_KMEM
53290 + select GRKERNSEC_RESLOG
53291 + select GRKERNSEC_RANDNET
53292 + select GRKERNSEC_PROC_ADD
53293 + select GRKERNSEC_CHROOT_CHMOD
53294 + select GRKERNSEC_CHROOT_NICE
53295 + select GRKERNSEC_AUDIT_MOUNT
53296 + select GRKERNSEC_MODHARDEN if (MODULES)
53297 + select GRKERNSEC_HARDEN_PTRACE
53298 + select GRKERNSEC_VM86 if (X86_32)
53299 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
53300 + select PAX
53301 + select PAX_RANDUSTACK
53302 + select PAX_ASLR
53303 + select PAX_RANDMMAP
53304 + select PAX_NOEXEC
53305 + select PAX_MPROTECT
53306 + select PAX_EI_PAX
53307 + select PAX_PT_PAX_FLAGS
53308 + select PAX_HAVE_ACL_FLAGS
53309 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
53310 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
53311 + select PAX_RANDKSTACK if (X86_TSC && X86)
53312 + select PAX_SEGMEXEC if (X86_32)
53313 + select PAX_PAGEEXEC
53314 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
53315 + select PAX_EMUTRAMP if (PARISC)
53316 + select PAX_EMUSIGRT if (PARISC)
53317 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
53318 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
53319 + select PAX_REFCOUNT if (X86 || SPARC64)
53320 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
53321 + help
53322 + If you say Y here, many of the features of grsecurity will be
53323 + enabled, which will protect you against many kinds of attacks
53324 + against your system. The heightened security comes at a cost
53325 + of an increased chance of incompatibilities with rare software
53326 + on your machine. Since this security level enables PaX, you should
53327 + view <http://pax.grsecurity.net> and read about the PaX
53328 + project. While you are there, download chpax and run it on
53329 + binaries that cause problems with PaX. Also remember that
53330 + since the /proc restrictions are enabled, you must run your
53331 + identd as gid 1001. This security level enables the following
53332 + features in addition to those listed in the low and medium
53333 + security levels:
53334 +
53335 + - Additional /proc restrictions
53336 + - Chmod restrictions in chroot
53337 + - No signals, ptrace, or viewing of processes outside of chroot
53338 + - Capability restrictions in chroot
53339 + - Deny fchdir out of chroot
53340 + - Priority restrictions in chroot
53341 + - Segmentation-based implementation of PaX
53342 + - Mprotect restrictions
53343 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
53344 + - Kernel stack randomization
53345 + - Mount/unmount/remount logging
53346 + - Kernel symbol hiding
53347 + - Prevention of memory exhaustion-based exploits
53348 + - Hardening of module auto-loading
53349 + - Ptrace restrictions
53350 + - Restricted vm86 mode
53351 + - Restricted sysfs/debugfs
53352 + - Active kernel exploit response
53353 +
53354 +config GRKERNSEC_CUSTOM
53355 + bool "Custom"
53356 + help
53357 + If you say Y here, you will be able to configure every grsecurity
53358 + option, which allows you to enable many more features that aren't
53359 + covered in the basic security levels. These additional features
53360 + include TPE, socket restrictions, and the sysctl system for
53361 + grsecurity. It is advised that you read through the help for
53362 + each option to determine its usefulness in your situation.
53363 +
53364 +endchoice
53365 +
53366 +menu "Address Space Protection"
53367 +depends on GRKERNSEC
53368 +
53369 +config GRKERNSEC_KMEM
53370 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
53371 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
53372 + help
53373 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
53374 + be written to via mmap or otherwise to modify the running kernel.
53375 + /dev/port will also not be allowed to be opened. If you have module
53376 + support disabled, enabling this will close up four ways that are
53377 + currently used to insert malicious code into the running kernel.
53378 + Even with all these features enabled, we still highly recommend that
53379 + you use the RBAC system, as it is still possible for an attacker to
53380 + modify the running kernel through privileged I/O granted by ioperm/iopl.
53381 + If you are not using XFree86, you may be able to stop this additional
53382 + case by enabling the 'Disable privileged I/O' option. Though nothing
53383 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
53384 + but only to video memory, which is the only writing we allow in this
53385 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
53386 + not be allowed to mprotect it with PROT_WRITE later.
53387 + It is highly recommended that you say Y here if you meet all the
53388 + conditions above.
53389 +
53390 +config GRKERNSEC_VM86
53391 + bool "Restrict VM86 mode"
53392 + depends on X86_32
53393 +
53394 + help
53395 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
53396 + make use of a special execution mode on 32bit x86 processors called
53397 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
53398 + video cards and will still work with this option enabled. The purpose
53399 + of the option is to prevent exploitation of emulation errors in
53400 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
53401 + Nearly all users should be able to enable this option.
53402 +
53403 +config GRKERNSEC_IO
53404 + bool "Disable privileged I/O"
53405 + depends on X86
53406 + select RTC_CLASS
53407 + select RTC_INTF_DEV
53408 + select RTC_DRV_CMOS
53409 +
53410 + help
53411 + If you say Y here, all ioperm and iopl calls will return an error.
53412 + Ioperm and iopl can be used to modify the running kernel.
53413 + Unfortunately, some programs need this access to operate properly,
53414 + the most notable of which are XFree86 and hwclock. hwclock can be
53415 + remedied by having RTC support in the kernel, so real-time
53416 + clock support is enabled if this option is enabled, to ensure
53417 + that hwclock operates correctly. XFree86 still will not
53418 + operate correctly with this option enabled, so DO NOT CHOOSE Y
53419 + IF YOU USE XFree86. If you use XFree86 and you still want to
53420 + protect your kernel against modification, use the RBAC system.
53421 +
53422 +config GRKERNSEC_PROC_MEMMAP
53423 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
53424 + default y if (PAX_NOEXEC || PAX_ASLR)
53425 + depends on PAX_NOEXEC || PAX_ASLR
53426 + help
53427 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
53428 + give no information about the addresses of its mappings if
53429 + PaX features that rely on random addresses are enabled on the task.
53430 + If you use PaX it is greatly recommended that you say Y here as it
53431 + closes up a hole that makes the full ASLR useless for suid
53432 + binaries.
53433 +
53434 +config GRKERNSEC_BRUTE
53435 + bool "Deter exploit bruteforcing"
53436 + help
53437 + If you say Y here, attempts to bruteforce exploits against forking
53438 + daemons such as apache or sshd, as well as against suid/sgid binaries
53439 + will be deterred. When a child of a forking daemon is killed by PaX
53440 + or crashes due to an illegal instruction or other suspicious signal,
53441 + the parent process will be delayed 30 seconds upon every subsequent
53442 + fork until the administrator is able to assess the situation and
53443 + restart the daemon.
53444 + In the suid/sgid case, the attempt is logged, the user has all their
53445 + processes terminated, and they are prevented from executing any further
53446 + processes for 15 minutes.
53447 + It is recommended that you also enable signal logging in the auditing
53448 + section so that logs are generated when a process triggers a suspicious
53449 + signal.
53450 + If the sysctl option is enabled, a sysctl option with name
53451 + "deter_bruteforce" is created.
53452 +
53453 +
53454 +config GRKERNSEC_MODHARDEN
53455 + bool "Harden module auto-loading"
53456 + depends on MODULES
53457 + help
53458 + If you say Y here, module auto-loading in response to use of some
53459 + feature implemented by an unloaded module will be restricted to
53460 + root users. Enabling this option helps defend against attacks
53461 + by unprivileged users who abuse the auto-loading behavior to
53462 + cause a vulnerable module to load that is then exploited.
53463 +
53464 + If this option prevents a legitimate use of auto-loading for a
53465 + non-root user, the administrator can execute modprobe manually
53466 + with the exact name of the module mentioned in the alert log.
53467 + Alternatively, the administrator can add the module to the list
53468 + of modules loaded at boot by modifying init scripts.
53469 +
53470 + Modification of init scripts will most likely be needed on
53471 + Ubuntu servers with encrypted home directory support enabled,
53472 + as the first non-root user logging in will cause the ecb(aes),
53473 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
53474 +
53475 +config GRKERNSEC_HIDESYM
53476 + bool "Hide kernel symbols"
53477 + help
53478 + If you say Y here, getting information on loaded modules, and
53479 + displaying all kernel symbols through a syscall will be restricted
53480 + to users with CAP_SYS_MODULE. For software compatibility reasons,
53481 + /proc/kallsyms will be restricted to the root user. The RBAC
53482 + system can hide that entry even from root.
53483 +
53484 + This option also prevents leaking of kernel addresses through
53485 + several /proc entries.
53486 +
53487 + Note that this option is only effective provided the following
53488 + conditions are met:
53489 + 1) The kernel using grsecurity is not precompiled by some distribution
53490 + 2) You have also enabled GRKERNSEC_DMESG
53491 + 3) You are using the RBAC system and hiding other files such as your
53492 + kernel image and System.map. Alternatively, enabling this option
53493 + causes the permissions on /boot, /lib/modules, and the kernel
53494 + source directory to change at compile time to prevent
53495 + reading by non-root users.
53496 + If the above conditions are met, this option will aid in providing a
53497 + useful protection against local kernel exploitation of overflows
53498 + and arbitrary read/write vulnerabilities.
53499 +
53500 +config GRKERNSEC_KERN_LOCKOUT
53501 + bool "Active kernel exploit response"
53502 + depends on X86 || ARM || PPC || SPARC
53503 + help
53504 + If you say Y here, when a PaX alert is triggered due to suspicious
53505 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
53506 + or an OOPs occurs due to bad memory accesses, instead of just
53507 + terminating the offending process (and potentially allowing
53508 + a subsequent exploit from the same user), we will take one of two
53509 + actions:
53510 + If the user was root, we will panic the system
53511 + If the user was non-root, we will log the attempt, terminate
53512 + all processes owned by the user, then prevent them from creating
53513 + any new processes until the system is restarted
53514 + This deters repeated kernel exploitation/bruteforcing attempts
53515 + and is useful for later forensics.
53516 +
53517 +endmenu
53518 +menu "Role Based Access Control Options"
53519 +depends on GRKERNSEC
53520 +
53521 +config GRKERNSEC_RBAC_DEBUG
53522 + bool
53523 +
53524 +config GRKERNSEC_NO_RBAC
53525 + bool "Disable RBAC system"
53526 + help
53527 + If you say Y here, the /dev/grsec device will be removed from the kernel,
53528 + preventing the RBAC system from being enabled. You should only say Y
53529 + here if you have no intention of using the RBAC system, so as to prevent
53530 + an attacker with root access from misusing the RBAC system to hide files
53531 + and processes when loadable module support and /dev/[k]mem have been
53532 + locked down.
53533 +
53534 +config GRKERNSEC_ACL_HIDEKERN
53535 + bool "Hide kernel processes"
53536 + help
53537 + If you say Y here, all kernel threads will be hidden to all
53538 + processes but those whose subject has the "view hidden processes"
53539 + flag.
53540 +
53541 +config GRKERNSEC_ACL_MAXTRIES
53542 + int "Maximum tries before password lockout"
53543 + default 3
53544 + help
53545 + This option enforces the maximum number of times a user can attempt
53546 + to authorize themselves with the grsecurity RBAC system before being
53547 + denied the ability to attempt authorization again for a specified time.
53548 + The lower the number, the harder it will be to brute-force a password.
53549 +
53550 +config GRKERNSEC_ACL_TIMEOUT
53551 + int "Time to wait after max password tries, in seconds"
53552 + default 30
53553 + help
53554 + This option specifies the time the user must wait after attempting to
53555 + authorize to the RBAC system with the maximum number of invalid
53556 + passwords. The higher the number, the harder it will be to brute-force
53557 + a password.
53558 +
53559 +endmenu
53560 +menu "Filesystem Protections"
53561 +depends on GRKERNSEC
53562 +
53563 +config GRKERNSEC_PROC
53564 + bool "Proc restrictions"
53565 + help
53566 + If you say Y here, the permissions of the /proc filesystem
53567 + will be altered to enhance system security and privacy. You MUST
53568 + choose either a user only restriction or a user and group restriction.
53569 + Depending upon the option you choose, you can either restrict users to
53570 + see only the processes they themselves run, or choose a group that can
53571 + view all processes and files normally restricted to root if you choose
53572 + the "restrict to user only" option. NOTE: If you're running identd as
53573 + a non-root user, you will have to run it as the group you specify here.
53574 +
53575 +config GRKERNSEC_PROC_USER
53576 + bool "Restrict /proc to user only"
53577 + depends on GRKERNSEC_PROC
53578 + help
53579 + If you say Y here, non-root users will only be able to view their own
53580 + processes, and restricts them from viewing network-related information,
53581 + and viewing kernel symbol and module information.
53582 +
53583 +config GRKERNSEC_PROC_USERGROUP
53584 + bool "Allow special group"
53585 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53586 + help
53587 + If you say Y here, you will be able to select a group that will be
53588 + able to view all processes and network-related information. If you've
53589 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53590 + remain hidden. This option is useful if you want to run identd as
53591 + a non-root user.
53592 +
53593 +config GRKERNSEC_PROC_GID
53594 + int "GID for special group"
53595 + depends on GRKERNSEC_PROC_USERGROUP
53596 + default 1001
53597 +
53598 +config GRKERNSEC_PROC_ADD
53599 + bool "Additional restrictions"
53600 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53601 + help
53602 + If you say Y here, additional restrictions will be placed on
53603 + /proc that keep normal users from viewing device information and
53604 + slabinfo information that could be useful for exploits.
53605 +
53606 +config GRKERNSEC_LINK
53607 + bool "Linking restrictions"
53608 + help
53609 + If you say Y here, /tmp race exploits will be prevented, since users
53610 + will no longer be able to follow symlinks owned by other users in
53611 + world-writable +t directories (e.g. /tmp), unless the owner of the
53612 + symlink is the owner of the directory. users will also not be
53613 + able to hardlink to files they do not own. If the sysctl option is
53614 + enabled, a sysctl option with name "linking_restrictions" is created.
53615 +
53616 +config GRKERNSEC_FIFO
53617 + bool "FIFO restrictions"
53618 + help
53619 + If you say Y here, users will not be able to write to FIFOs they don't
53620 + own in world-writable +t directories (e.g. /tmp), unless the owner of
53621 + the FIFO is the same owner of the directory it's held in. If the sysctl
53622 + option is enabled, a sysctl option with name "fifo_restrictions" is
53623 + created.
53624 +
53625 +config GRKERNSEC_SYSFS_RESTRICT
53626 + bool "Sysfs/debugfs restriction"
53627 + depends on SYSFS
53628 + help
53629 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53630 + any filesystem normally mounted under it (e.g. debugfs) will only
53631 + be accessible by root. These filesystems generally provide access
53632 + to hardware and debug information that isn't appropriate for unprivileged
53633 + users of the system. Sysfs and debugfs have also become a large source
53634 + of new vulnerabilities, ranging from infoleaks to local compromise.
53635 + There has been very little oversight with an eye toward security involved
53636 + in adding new exporters of information to these filesystems, so their
53637 + use is discouraged.
53638 + This option is equivalent to a chmod 0700 of the mount paths.
53639 +
53640 +config GRKERNSEC_ROFS
53641 + bool "Runtime read-only mount protection"
53642 + help
53643 + If you say Y here, a sysctl option with name "romount_protect" will
53644 + be created. By setting this option to 1 at runtime, filesystems
53645 + will be protected in the following ways:
53646 + * No new writable mounts will be allowed
53647 + * Existing read-only mounts won't be able to be remounted read/write
53648 + * Write operations will be denied on all block devices
53649 + This option acts independently of grsec_lock: once it is set to 1,
53650 + it cannot be turned off. Therefore, please be mindful of the resulting
53651 + behavior if this option is enabled in an init script on a read-only
53652 + filesystem. This feature is mainly intended for secure embedded systems.
53653 +
53654 +config GRKERNSEC_CHROOT
53655 + bool "Chroot jail restrictions"
53656 + help
53657 + If you say Y here, you will be able to choose several options that will
53658 + make breaking out of a chrooted jail much more difficult. If you
53659 + encounter no software incompatibilities with the following options, it
53660 + is recommended that you enable each one.
53661 +
53662 +config GRKERNSEC_CHROOT_MOUNT
53663 + bool "Deny mounts"
53664 + depends on GRKERNSEC_CHROOT
53665 + help
53666 + If you say Y here, processes inside a chroot will not be able to
53667 + mount or remount filesystems. If the sysctl option is enabled, a
53668 + sysctl option with name "chroot_deny_mount" is created.
53669 +
53670 +config GRKERNSEC_CHROOT_DOUBLE
53671 + bool "Deny double-chroots"
53672 + depends on GRKERNSEC_CHROOT
53673 + help
53674 + If you say Y here, processes inside a chroot will not be able to chroot
53675 + again outside the chroot. This is a widely used method of breaking
53676 + out of a chroot jail and should not be allowed. If the sysctl
53677 + option is enabled, a sysctl option with name
53678 + "chroot_deny_chroot" is created.
53679 +
53680 +config GRKERNSEC_CHROOT_PIVOT
53681 + bool "Deny pivot_root in chroot"
53682 + depends on GRKERNSEC_CHROOT
53683 + help
53684 + If you say Y here, processes inside a chroot will not be able to use
53685 + a function called pivot_root() that was introduced in Linux 2.3.41. It
53686 + works similar to chroot in that it changes the root filesystem. This
53687 + function could be misused in a chrooted process to attempt to break out
53688 + of the chroot, and therefore should not be allowed. If the sysctl
53689 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
53690 + created.
53691 +
53692 +config GRKERNSEC_CHROOT_CHDIR
53693 + bool "Enforce chdir(\"/\") on all chroots"
53694 + depends on GRKERNSEC_CHROOT
53695 + help
53696 + If you say Y here, the current working directory of all newly-chrooted
53697 + applications will be set to the the root directory of the chroot.
53698 + The man page on chroot(2) states:
53699 + Note that this call does not change the current working
53700 + directory, so that `.' can be outside the tree rooted at
53701 + `/'. In particular, the super-user can escape from a
53702 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53703 +
53704 + It is recommended that you say Y here, since it's not known to break
53705 + any software. If the sysctl option is enabled, a sysctl option with
53706 + name "chroot_enforce_chdir" is created.
53707 +
53708 +config GRKERNSEC_CHROOT_CHMOD
53709 + bool "Deny (f)chmod +s"
53710 + depends on GRKERNSEC_CHROOT
53711 + help
53712 + If you say Y here, processes inside a chroot will not be able to chmod
53713 + or fchmod files to make them have suid or sgid bits. This protects
53714 + against another published method of breaking a chroot. If the sysctl
53715 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
53716 + created.
53717 +
53718 +config GRKERNSEC_CHROOT_FCHDIR
53719 + bool "Deny fchdir out of chroot"
53720 + depends on GRKERNSEC_CHROOT
53721 + help
53722 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53723 + to a file descriptor of the chrooting process that points to a directory
53724 + outside the filesystem will be stopped. If the sysctl option
53725 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53726 +
53727 +config GRKERNSEC_CHROOT_MKNOD
53728 + bool "Deny mknod"
53729 + depends on GRKERNSEC_CHROOT
53730 + help
53731 + If you say Y here, processes inside a chroot will not be allowed to
53732 + mknod. The problem with using mknod inside a chroot is that it
53733 + would allow an attacker to create a device entry that is the same
53734 + as one on the physical root of your system, which could range from
53735 + anything from the console device to a device for your harddrive (which
53736 + they could then use to wipe the drive or steal data). It is recommended
53737 + that you say Y here, unless you run into software incompatibilities.
53738 + If the sysctl option is enabled, a sysctl option with name
53739 + "chroot_deny_mknod" is created.
53740 +
53741 +config GRKERNSEC_CHROOT_SHMAT
53742 + bool "Deny shmat() out of chroot"
53743 + depends on GRKERNSEC_CHROOT
53744 + help
53745 + If you say Y here, processes inside a chroot will not be able to attach
53746 + to shared memory segments that were created outside of the chroot jail.
53747 + It is recommended that you say Y here. If the sysctl option is enabled,
53748 + a sysctl option with name "chroot_deny_shmat" is created.
53749 +
53750 +config GRKERNSEC_CHROOT_UNIX
53751 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53752 + depends on GRKERNSEC_CHROOT
53753 + help
53754 + If you say Y here, processes inside a chroot will not be able to
53755 + connect to abstract (meaning not belonging to a filesystem) Unix
53756 + domain sockets that were bound outside of a chroot. It is recommended
53757 + that you say Y here. If the sysctl option is enabled, a sysctl option
53758 + with name "chroot_deny_unix" is created.
53759 +
53760 +config GRKERNSEC_CHROOT_FINDTASK
53761 + bool "Protect outside processes"
53762 + depends on GRKERNSEC_CHROOT
53763 + help
53764 + If you say Y here, processes inside a chroot will not be able to
53765 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53766 + getsid, or view any process outside of the chroot. If the sysctl
53767 + option is enabled, a sysctl option with name "chroot_findtask" is
53768 + created.
53769 +
53770 +config GRKERNSEC_CHROOT_NICE
53771 + bool "Restrict priority changes"
53772 + depends on GRKERNSEC_CHROOT
53773 + help
53774 + If you say Y here, processes inside a chroot will not be able to raise
53775 + the priority of processes in the chroot, or alter the priority of
53776 + processes outside the chroot. This provides more security than simply
53777 + removing CAP_SYS_NICE from the process' capability set. If the
53778 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53779 + is created.
53780 +
53781 +config GRKERNSEC_CHROOT_SYSCTL
53782 + bool "Deny sysctl writes"
53783 + depends on GRKERNSEC_CHROOT
53784 + help
53785 + If you say Y here, an attacker in a chroot will not be able to
53786 + write to sysctl entries, either by sysctl(2) or through a /proc
53787 + interface. It is strongly recommended that you say Y here. If the
53788 + sysctl option is enabled, a sysctl option with name
53789 + "chroot_deny_sysctl" is created.
53790 +
53791 +config GRKERNSEC_CHROOT_CAPS
53792 + bool "Capability restrictions"
53793 + depends on GRKERNSEC_CHROOT
53794 + help
53795 + If you say Y here, the capabilities on all processes within a
53796 + chroot jail will be lowered to stop module insertion, raw i/o,
53797 + system and net admin tasks, rebooting the system, modifying immutable
53798 + files, modifying IPC owned by another, and changing the system time.
53799 + This is left an option because it can break some apps. Disable this
53800 + if your chrooted apps are having problems performing those kinds of
53801 + tasks. If the sysctl option is enabled, a sysctl option with
53802 + name "chroot_caps" is created.
53803 +
53804 +endmenu
53805 +menu "Kernel Auditing"
53806 +depends on GRKERNSEC
53807 +
53808 +config GRKERNSEC_AUDIT_GROUP
53809 + bool "Single group for auditing"
53810 + help
53811 + If you say Y here, the exec, chdir, and (un)mount logging features
53812 + will only operate on a group you specify. This option is recommended
53813 + if you only want to watch certain users instead of having a large
53814 + amount of logs from the entire system. If the sysctl option is enabled,
53815 + a sysctl option with name "audit_group" is created.
53816 +
53817 +config GRKERNSEC_AUDIT_GID
53818 + int "GID for auditing"
53819 + depends on GRKERNSEC_AUDIT_GROUP
53820 + default 1007
53821 +
53822 +config GRKERNSEC_EXECLOG
53823 + bool "Exec logging"
53824 + help
53825 + If you say Y here, all execve() calls will be logged (since the
53826 + other exec*() calls are frontends to execve(), all execution
53827 + will be logged). Useful for shell-servers that like to keep track
53828 + of their users. If the sysctl option is enabled, a sysctl option with
53829 + name "exec_logging" is created.
53830 + WARNING: This option when enabled will produce a LOT of logs, especially
53831 + on an active system.
53832 +
53833 +config GRKERNSEC_RESLOG
53834 + bool "Resource logging"
53835 + help
53836 + If you say Y here, all attempts to overstep resource limits will
53837 + be logged with the resource name, the requested size, and the current
53838 + limit. It is highly recommended that you say Y here. If the sysctl
53839 + option is enabled, a sysctl option with name "resource_logging" is
53840 + created. If the RBAC system is enabled, the sysctl value is ignored.
53841 +
53842 +config GRKERNSEC_CHROOT_EXECLOG
53843 + bool "Log execs within chroot"
53844 + help
53845 + If you say Y here, all executions inside a chroot jail will be logged
53846 + to syslog. This can cause a large amount of logs if certain
53847 + applications (eg. djb's daemontools) are installed on the system, and
53848 + is therefore left as an option. If the sysctl option is enabled, a
53849 + sysctl option with name "chroot_execlog" is created.
53850 +
53851 +config GRKERNSEC_AUDIT_PTRACE
53852 + bool "Ptrace logging"
53853 + help
53854 + If you say Y here, all attempts to attach to a process via ptrace
53855 + will be logged. If the sysctl option is enabled, a sysctl option
53856 + with name "audit_ptrace" is created.
53857 +
53858 +config GRKERNSEC_AUDIT_CHDIR
53859 + bool "Chdir logging"
53860 + help
53861 + If you say Y here, all chdir() calls will be logged. If the sysctl
53862 + option is enabled, a sysctl option with name "audit_chdir" is created.
53863 +
53864 +config GRKERNSEC_AUDIT_MOUNT
53865 + bool "(Un)Mount logging"
53866 + help
53867 + If you say Y here, all mounts and unmounts will be logged. If the
53868 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53869 + created.
53870 +
53871 +config GRKERNSEC_SIGNAL
53872 + bool "Signal logging"
53873 + help
53874 + If you say Y here, certain important signals will be logged, such as
53875 + SIGSEGV, which will as a result inform you of when a error in a program
53876 + occurred, which in some cases could mean a possible exploit attempt.
53877 + If the sysctl option is enabled, a sysctl option with name
53878 + "signal_logging" is created.
53879 +
53880 +config GRKERNSEC_FORKFAIL
53881 + bool "Fork failure logging"
53882 + help
53883 + If you say Y here, all failed fork() attempts will be logged.
53884 + This could suggest a fork bomb, or someone attempting to overstep
53885 + their process limit. If the sysctl option is enabled, a sysctl option
53886 + with name "forkfail_logging" is created.
53887 +
53888 +config GRKERNSEC_TIME
53889 + bool "Time change logging"
53890 + help
53891 + If you say Y here, any changes of the system clock will be logged.
53892 + If the sysctl option is enabled, a sysctl option with name
53893 + "timechange_logging" is created.
53894 +
53895 +config GRKERNSEC_PROC_IPADDR
53896 + bool "/proc/<pid>/ipaddr support"
53897 + help
53898 + If you say Y here, a new entry will be added to each /proc/<pid>
53899 + directory that contains the IP address of the person using the task.
53900 + The IP is carried across local TCP and AF_UNIX stream sockets.
53901 + This information can be useful for IDS/IPSes to perform remote response
53902 + to a local attack. The entry is readable by only the owner of the
53903 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53904 + the RBAC system), and thus does not create privacy concerns.
53905 +
53906 +config GRKERNSEC_RWXMAP_LOG
53907 + bool 'Denied RWX mmap/mprotect logging'
53908 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53909 + help
53910 + If you say Y here, calls to mmap() and mprotect() with explicit
53911 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53912 + denied by the PAX_MPROTECT feature. If the sysctl option is
53913 + enabled, a sysctl option with name "rwxmap_logging" is created.
53914 +
53915 +config GRKERNSEC_AUDIT_TEXTREL
53916 + bool 'ELF text relocations logging (READ HELP)'
53917 + depends on PAX_MPROTECT
53918 + help
53919 + If you say Y here, text relocations will be logged with the filename
53920 + of the offending library or binary. The purpose of the feature is
53921 + to help Linux distribution developers get rid of libraries and
53922 + binaries that need text relocations which hinder the future progress
53923 + of PaX. Only Linux distribution developers should say Y here, and
53924 + never on a production machine, as this option creates an information
53925 + leak that could aid an attacker in defeating the randomization of
53926 + a single memory region. If the sysctl option is enabled, a sysctl
53927 + option with name "audit_textrel" is created.
53928 +
53929 +endmenu
53930 +
53931 +menu "Executable Protections"
53932 +depends on GRKERNSEC
53933 +
53934 +config GRKERNSEC_DMESG
53935 + bool "Dmesg(8) restriction"
53936 + help
53937 + If you say Y here, non-root users will not be able to use dmesg(8)
53938 + to view up to the last 4kb of messages in the kernel's log buffer.
53939 + The kernel's log buffer often contains kernel addresses and other
53940 + identifying information useful to an attacker in fingerprinting a
53941 + system for a targeted exploit.
53942 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53943 + created.
53944 +
53945 +config GRKERNSEC_HARDEN_PTRACE
53946 + bool "Deter ptrace-based process snooping"
53947 + help
53948 + If you say Y here, TTY sniffers and other malicious monitoring
53949 + programs implemented through ptrace will be defeated. If you
53950 + have been using the RBAC system, this option has already been
53951 + enabled for several years for all users, with the ability to make
53952 + fine-grained exceptions.
53953 +
53954 + This option only affects the ability of non-root users to ptrace
53955 + processes that are not a descendent of the ptracing process.
53956 + This means that strace ./binary and gdb ./binary will still work,
53957 + but attaching to arbitrary processes will not. If the sysctl
53958 + option is enabled, a sysctl option with name "harden_ptrace" is
53959 + created.
53960 +
53961 +config GRKERNSEC_TPE
53962 + bool "Trusted Path Execution (TPE)"
53963 + help
53964 + If you say Y here, you will be able to choose a gid to add to the
53965 + supplementary groups of users you want to mark as "untrusted."
53966 + These users will not be able to execute any files that are not in
53967 + root-owned directories writable only by root. If the sysctl option
53968 + is enabled, a sysctl option with name "tpe" is created.
53969 +
53970 +config GRKERNSEC_TPE_ALL
53971 + bool "Partially restrict all non-root users"
53972 + depends on GRKERNSEC_TPE
53973 + help
53974 + If you say Y here, all non-root users will be covered under
53975 + a weaker TPE restriction. This is separate from, and in addition to,
53976 + the main TPE options that you have selected elsewhere. Thus, if a
53977 + "trusted" GID is chosen, this restriction applies to even that GID.
53978 + Under this restriction, all non-root users will only be allowed to
53979 + execute files in directories they own that are not group or
53980 + world-writable, or in directories owned by root and writable only by
53981 + root. If the sysctl option is enabled, a sysctl option with name
53982 + "tpe_restrict_all" is created.
53983 +
53984 +config GRKERNSEC_TPE_INVERT
53985 + bool "Invert GID option"
53986 + depends on GRKERNSEC_TPE
53987 + help
53988 + If you say Y here, the group you specify in the TPE configuration will
53989 + decide what group TPE restrictions will be *disabled* for. This
53990 + option is useful if you want TPE restrictions to be applied to most
53991 + users on the system. If the sysctl option is enabled, a sysctl option
53992 + with name "tpe_invert" is created. Unlike other sysctl options, this
53993 + entry will default to on for backward-compatibility.
53994 +
53995 +config GRKERNSEC_TPE_GID
53996 + int "GID for untrusted users"
53997 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53998 + default 1005
53999 + help
54000 + Setting this GID determines what group TPE restrictions will be
54001 + *enabled* for. If the sysctl option is enabled, a sysctl option
54002 + with name "tpe_gid" is created.
54003 +
54004 +config GRKERNSEC_TPE_GID
54005 + int "GID for trusted users"
54006 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
54007 + default 1005
54008 + help
54009 + Setting this GID determines what group TPE restrictions will be
54010 + *disabled* for. If the sysctl option is enabled, a sysctl option
54011 + with name "tpe_gid" is created.
54012 +
54013 +endmenu
54014 +menu "Network Protections"
54015 +depends on GRKERNSEC
54016 +
54017 +config GRKERNSEC_RANDNET
54018 + bool "Larger entropy pools"
54019 + help
54020 + If you say Y here, the entropy pools used for many features of Linux
54021 + and grsecurity will be doubled in size. Since several grsecurity
54022 + features use additional randomness, it is recommended that you say Y
54023 + here. Saying Y here has a similar effect as modifying
54024 + /proc/sys/kernel/random/poolsize.
54025 +
54026 +config GRKERNSEC_BLACKHOLE
54027 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
54028 + depends on NET
54029 + help
54030 + If you say Y here, neither TCP resets nor ICMP
54031 + destination-unreachable packets will be sent in response to packets
54032 + sent to ports for which no associated listening process exists.
54033 + This feature supports both IPV4 and IPV6 and exempts the
54034 + loopback interface from blackholing. Enabling this feature
54035 + makes a host more resilient to DoS attacks and reduces network
54036 + visibility against scanners.
54037 +
54038 + The blackhole feature as-implemented is equivalent to the FreeBSD
54039 + blackhole feature, as it prevents RST responses to all packets, not
54040 + just SYNs. Under most application behavior this causes no
54041 + problems, but applications (like haproxy) may not close certain
54042 + connections in a way that cleanly terminates them on the remote
54043 + end, leaving the remote host in LAST_ACK state. Because of this
54044 + side-effect and to prevent intentional LAST_ACK DoSes, this
54045 + feature also adds automatic mitigation against such attacks.
54046 + The mitigation drastically reduces the amount of time a socket
54047 + can spend in LAST_ACK state. If you're using haproxy and not
54048 + all servers it connects to have this option enabled, consider
54049 + disabling this feature on the haproxy host.
54050 +
54051 + If the sysctl option is enabled, two sysctl options with names
54052 + "ip_blackhole" and "lastack_retries" will be created.
54053 + While "ip_blackhole" takes the standard zero/non-zero on/off
54054 + toggle, "lastack_retries" uses the same kinds of values as
54055 + "tcp_retries1" and "tcp_retries2". The default value of 4
54056 + prevents a socket from lasting more than 45 seconds in LAST_ACK
54057 + state.
54058 +
54059 +config GRKERNSEC_SOCKET
54060 + bool "Socket restrictions"
54061 + depends on NET
54062 + help
54063 + If you say Y here, you will be able to choose from several options.
54064 + If you assign a GID on your system and add it to the supplementary
54065 + groups of users you want to restrict socket access to, this patch
54066 + will perform up to three things, based on the option(s) you choose.
54067 +
54068 +config GRKERNSEC_SOCKET_ALL
54069 + bool "Deny any sockets to group"
54070 + depends on GRKERNSEC_SOCKET
54071 + help
54072 + If you say Y here, you will be able to choose a GID of whose users will
54073 + be unable to connect to other hosts from your machine or run server
54074 + applications from your machine. If the sysctl option is enabled, a
54075 + sysctl option with name "socket_all" is created.
54076 +
54077 +config GRKERNSEC_SOCKET_ALL_GID
54078 + int "GID to deny all sockets for"
54079 + depends on GRKERNSEC_SOCKET_ALL
54080 + default 1004
54081 + help
54082 + Here you can choose the GID to disable socket access for. Remember to
54083 + add the users you want socket access disabled for to the GID
54084 + specified here. If the sysctl option is enabled, a sysctl option
54085 + with name "socket_all_gid" is created.
54086 +
54087 +config GRKERNSEC_SOCKET_CLIENT
54088 + bool "Deny client sockets to group"
54089 + depends on GRKERNSEC_SOCKET
54090 + help
54091 + If you say Y here, you will be able to choose a GID of whose users will
54092 + be unable to connect to other hosts from your machine, but will be
54093 + able to run servers. If this option is enabled, all users in the group
54094 + you specify will have to use passive mode when initiating ftp transfers
54095 + from the shell on your machine. If the sysctl option is enabled, a
54096 + sysctl option with name "socket_client" is created.
54097 +
54098 +config GRKERNSEC_SOCKET_CLIENT_GID
54099 + int "GID to deny client sockets for"
54100 + depends on GRKERNSEC_SOCKET_CLIENT
54101 + default 1003
54102 + help
54103 + Here you can choose the GID to disable client socket access for.
54104 + Remember to add the users you want client socket access disabled for to
54105 + the GID specified here. If the sysctl option is enabled, a sysctl
54106 + option with name "socket_client_gid" is created.
54107 +
54108 +config GRKERNSEC_SOCKET_SERVER
54109 + bool "Deny server sockets to group"
54110 + depends on GRKERNSEC_SOCKET
54111 + help
54112 + If you say Y here, you will be able to choose a GID of whose users will
54113 + be unable to run server applications from your machine. If the sysctl
54114 + option is enabled, a sysctl option with name "socket_server" is created.
54115 +
54116 +config GRKERNSEC_SOCKET_SERVER_GID
54117 + int "GID to deny server sockets for"
54118 + depends on GRKERNSEC_SOCKET_SERVER
54119 + default 1002
54120 + help
54121 + Here you can choose the GID to disable server socket access for.
54122 + Remember to add the users you want server socket access disabled for to
54123 + the GID specified here. If the sysctl option is enabled, a sysctl
54124 + option with name "socket_server_gid" is created.
54125 +
54126 +endmenu
54127 +menu "Sysctl support"
54128 +depends on GRKERNSEC && SYSCTL
54129 +
54130 +config GRKERNSEC_SYSCTL
54131 + bool "Sysctl support"
54132 + help
54133 + If you say Y here, you will be able to change the options that
54134 + grsecurity runs with at bootup, without having to recompile your
54135 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
54136 + to enable (1) or disable (0) various features. All the sysctl entries
54137 + are mutable until the "grsec_lock" entry is set to a non-zero value.
54138 + All features enabled in the kernel configuration are disabled at boot
54139 + if you do not say Y to the "Turn on features by default" option.
54140 + All options should be set at startup, and the grsec_lock entry should
54141 + be set to a non-zero value after all the options are set.
54142 + *THIS IS EXTREMELY IMPORTANT*
54143 +
54144 +config GRKERNSEC_SYSCTL_DISTRO
54145 + bool "Extra sysctl support for distro makers (READ HELP)"
54146 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
54147 + help
54148 + If you say Y here, additional sysctl options will be created
54149 + for features that affect processes running as root. Therefore,
54150 + it is critical when using this option that the grsec_lock entry be
54151 + enabled after boot. Only distros with prebuilt kernel packages
54152 + with this option enabled that can ensure grsec_lock is enabled
54153 + after boot should use this option.
54154 + *Failure to set grsec_lock after boot makes all grsec features
54155 + this option covers useless*
54156 +
54157 + Currently this option creates the following sysctl entries:
54158 + "Disable Privileged I/O": "disable_priv_io"
54159 +
54160 +config GRKERNSEC_SYSCTL_ON
54161 + bool "Turn on features by default"
54162 + depends on GRKERNSEC_SYSCTL
54163 + help
54164 + If you say Y here, instead of having all features enabled in the
54165 + kernel configuration disabled at boot time, the features will be
54166 + enabled at boot time. It is recommended you say Y here unless
54167 + there is some reason you would want all sysctl-tunable features to
54168 + be disabled by default. As mentioned elsewhere, it is important
54169 + to enable the grsec_lock entry once you have finished modifying
54170 + the sysctl entries.
54171 +
54172 +endmenu
54173 +menu "Logging Options"
54174 +depends on GRKERNSEC
54175 +
54176 +config GRKERNSEC_FLOODTIME
54177 + int "Seconds in between log messages (minimum)"
54178 + default 10
54179 + help
54180 + This option allows you to enforce the number of seconds between
54181 + grsecurity log messages. The default should be suitable for most
54182 + people, however, if you choose to change it, choose a value small enough
54183 + to allow informative logs to be produced, but large enough to
54184 + prevent flooding.
54185 +
54186 +config GRKERNSEC_FLOODBURST
54187 + int "Number of messages in a burst (maximum)"
54188 + default 6
54189 + help
54190 + This option allows you to choose the maximum number of messages allowed
54191 + within the flood time interval you chose in a separate option. The
54192 + default should be suitable for most people, however if you find that
54193 + many of your logs are being interpreted as flooding, you may want to
54194 + raise this value.
54195 +
54196 +endmenu
54197 +
54198 +endmenu
54199 diff -urNp linux-3.0.4/grsecurity/Makefile linux-3.0.4/grsecurity/Makefile
54200 --- linux-3.0.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
54201 +++ linux-3.0.4/grsecurity/Makefile 2011-09-14 23:29:56.000000000 -0400
54202 @@ -0,0 +1,35 @@
54203 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54204 +# during 2001-2009 it has been completely redesigned by Brad Spengler
54205 +# into an RBAC system
54206 +#
54207 +# All code in this directory and various hooks inserted throughout the kernel
54208 +# are copyright Brad Spengler - Open Source Security, Inc., and released
54209 +# under the GPL v2 or higher
54210 +
54211 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54212 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
54213 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54214 +
54215 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54216 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54217 + gracl_learn.o grsec_log.o
54218 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54219 +
54220 +ifdef CONFIG_NET
54221 +obj-y += grsec_sock.o
54222 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54223 +endif
54224 +
54225 +ifndef CONFIG_GRKERNSEC
54226 +obj-y += grsec_disabled.o
54227 +endif
54228 +
54229 +ifdef CONFIG_GRKERNSEC_HIDESYM
54230 +extra-y := grsec_hidesym.o
54231 +$(obj)/grsec_hidesym.o:
54232 + @-chmod -f 500 /boot
54233 + @-chmod -f 500 /lib/modules
54234 + @-chmod -f 500 /lib64/modules
54235 + @-chmod -f 700 .
54236 + @echo ' grsec: protected kernel image paths'
54237 +endif
54238 diff -urNp linux-3.0.4/include/acpi/acpi_bus.h linux-3.0.4/include/acpi/acpi_bus.h
54239 --- linux-3.0.4/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
54240 +++ linux-3.0.4/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
54241 @@ -107,7 +107,7 @@ struct acpi_device_ops {
54242 acpi_op_bind bind;
54243 acpi_op_unbind unbind;
54244 acpi_op_notify notify;
54245 -};
54246 +} __no_const;
54247
54248 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
54249
54250 diff -urNp linux-3.0.4/include/asm-generic/atomic-long.h linux-3.0.4/include/asm-generic/atomic-long.h
54251 --- linux-3.0.4/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
54252 +++ linux-3.0.4/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
54253 @@ -22,6 +22,12 @@
54254
54255 typedef atomic64_t atomic_long_t;
54256
54257 +#ifdef CONFIG_PAX_REFCOUNT
54258 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
54259 +#else
54260 +typedef atomic64_t atomic_long_unchecked_t;
54261 +#endif
54262 +
54263 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
54264
54265 static inline long atomic_long_read(atomic_long_t *l)
54266 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
54267 return (long)atomic64_read(v);
54268 }
54269
54270 +#ifdef CONFIG_PAX_REFCOUNT
54271 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54272 +{
54273 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54274 +
54275 + return (long)atomic64_read_unchecked(v);
54276 +}
54277 +#endif
54278 +
54279 static inline void atomic_long_set(atomic_long_t *l, long i)
54280 {
54281 atomic64_t *v = (atomic64_t *)l;
54282 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
54283 atomic64_set(v, i);
54284 }
54285
54286 +#ifdef CONFIG_PAX_REFCOUNT
54287 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54288 +{
54289 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54290 +
54291 + atomic64_set_unchecked(v, i);
54292 +}
54293 +#endif
54294 +
54295 static inline void atomic_long_inc(atomic_long_t *l)
54296 {
54297 atomic64_t *v = (atomic64_t *)l;
54298 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
54299 atomic64_inc(v);
54300 }
54301
54302 +#ifdef CONFIG_PAX_REFCOUNT
54303 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54304 +{
54305 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54306 +
54307 + atomic64_inc_unchecked(v);
54308 +}
54309 +#endif
54310 +
54311 static inline void atomic_long_dec(atomic_long_t *l)
54312 {
54313 atomic64_t *v = (atomic64_t *)l;
54314 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
54315 atomic64_dec(v);
54316 }
54317
54318 +#ifdef CONFIG_PAX_REFCOUNT
54319 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54320 +{
54321 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54322 +
54323 + atomic64_dec_unchecked(v);
54324 +}
54325 +#endif
54326 +
54327 static inline void atomic_long_add(long i, atomic_long_t *l)
54328 {
54329 atomic64_t *v = (atomic64_t *)l;
54330 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
54331 atomic64_add(i, v);
54332 }
54333
54334 +#ifdef CONFIG_PAX_REFCOUNT
54335 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54336 +{
54337 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54338 +
54339 + atomic64_add_unchecked(i, v);
54340 +}
54341 +#endif
54342 +
54343 static inline void atomic_long_sub(long i, atomic_long_t *l)
54344 {
54345 atomic64_t *v = (atomic64_t *)l;
54346 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
54347 atomic64_sub(i, v);
54348 }
54349
54350 +#ifdef CONFIG_PAX_REFCOUNT
54351 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
54352 +{
54353 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54354 +
54355 + atomic64_sub_unchecked(i, v);
54356 +}
54357 +#endif
54358 +
54359 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
54360 {
54361 atomic64_t *v = (atomic64_t *)l;
54362 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
54363 return (long)atomic64_inc_return(v);
54364 }
54365
54366 +#ifdef CONFIG_PAX_REFCOUNT
54367 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54368 +{
54369 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54370 +
54371 + return (long)atomic64_inc_return_unchecked(v);
54372 +}
54373 +#endif
54374 +
54375 static inline long atomic_long_dec_return(atomic_long_t *l)
54376 {
54377 atomic64_t *v = (atomic64_t *)l;
54378 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
54379
54380 typedef atomic_t atomic_long_t;
54381
54382 +#ifdef CONFIG_PAX_REFCOUNT
54383 +typedef atomic_unchecked_t atomic_long_unchecked_t;
54384 +#else
54385 +typedef atomic_t atomic_long_unchecked_t;
54386 +#endif
54387 +
54388 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
54389 static inline long atomic_long_read(atomic_long_t *l)
54390 {
54391 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
54392 return (long)atomic_read(v);
54393 }
54394
54395 +#ifdef CONFIG_PAX_REFCOUNT
54396 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54397 +{
54398 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54399 +
54400 + return (long)atomic_read_unchecked(v);
54401 +}
54402 +#endif
54403 +
54404 static inline void atomic_long_set(atomic_long_t *l, long i)
54405 {
54406 atomic_t *v = (atomic_t *)l;
54407 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
54408 atomic_set(v, i);
54409 }
54410
54411 +#ifdef CONFIG_PAX_REFCOUNT
54412 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54413 +{
54414 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54415 +
54416 + atomic_set_unchecked(v, i);
54417 +}
54418 +#endif
54419 +
54420 static inline void atomic_long_inc(atomic_long_t *l)
54421 {
54422 atomic_t *v = (atomic_t *)l;
54423 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
54424 atomic_inc(v);
54425 }
54426
54427 +#ifdef CONFIG_PAX_REFCOUNT
54428 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54429 +{
54430 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54431 +
54432 + atomic_inc_unchecked(v);
54433 +}
54434 +#endif
54435 +
54436 static inline void atomic_long_dec(atomic_long_t *l)
54437 {
54438 atomic_t *v = (atomic_t *)l;
54439 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
54440 atomic_dec(v);
54441 }
54442
54443 +#ifdef CONFIG_PAX_REFCOUNT
54444 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54445 +{
54446 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54447 +
54448 + atomic_dec_unchecked(v);
54449 +}
54450 +#endif
54451 +
54452 static inline void atomic_long_add(long i, atomic_long_t *l)
54453 {
54454 atomic_t *v = (atomic_t *)l;
54455 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
54456 atomic_add(i, v);
54457 }
54458
54459 +#ifdef CONFIG_PAX_REFCOUNT
54460 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54461 +{
54462 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54463 +
54464 + atomic_add_unchecked(i, v);
54465 +}
54466 +#endif
54467 +
54468 static inline void atomic_long_sub(long i, atomic_long_t *l)
54469 {
54470 atomic_t *v = (atomic_t *)l;
54471 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
54472 atomic_sub(i, v);
54473 }
54474
54475 +#ifdef CONFIG_PAX_REFCOUNT
54476 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
54477 +{
54478 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54479 +
54480 + atomic_sub_unchecked(i, v);
54481 +}
54482 +#endif
54483 +
54484 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
54485 {
54486 atomic_t *v = (atomic_t *)l;
54487 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
54488 return (long)atomic_inc_return(v);
54489 }
54490
54491 +#ifdef CONFIG_PAX_REFCOUNT
54492 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54493 +{
54494 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54495 +
54496 + return (long)atomic_inc_return_unchecked(v);
54497 +}
54498 +#endif
54499 +
54500 static inline long atomic_long_dec_return(atomic_long_t *l)
54501 {
54502 atomic_t *v = (atomic_t *)l;
54503 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
54504
54505 #endif /* BITS_PER_LONG == 64 */
54506
54507 +#ifdef CONFIG_PAX_REFCOUNT
54508 +static inline void pax_refcount_needs_these_functions(void)
54509 +{
54510 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
54511 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
54512 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
54513 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
54514 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
54515 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
54516 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
54517 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
54518 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
54519 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
54520 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
54521 +
54522 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
54523 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
54524 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
54525 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
54526 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
54527 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
54528 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
54529 +}
54530 +#else
54531 +#define atomic_read_unchecked(v) atomic_read(v)
54532 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
54533 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
54534 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
54535 +#define atomic_inc_unchecked(v) atomic_inc(v)
54536 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
54537 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
54538 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
54539 +#define atomic_dec_unchecked(v) atomic_dec(v)
54540 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
54541 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
54542 +
54543 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
54544 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
54545 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
54546 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
54547 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
54548 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
54549 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
54550 +#endif
54551 +
54552 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
54553 diff -urNp linux-3.0.4/include/asm-generic/cache.h linux-3.0.4/include/asm-generic/cache.h
54554 --- linux-3.0.4/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
54555 +++ linux-3.0.4/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
54556 @@ -6,7 +6,7 @@
54557 * cache lines need to provide their own cache.h.
54558 */
54559
54560 -#define L1_CACHE_SHIFT 5
54561 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54562 +#define L1_CACHE_SHIFT 5UL
54563 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
54564
54565 #endif /* __ASM_GENERIC_CACHE_H */
54566 diff -urNp linux-3.0.4/include/asm-generic/int-l64.h linux-3.0.4/include/asm-generic/int-l64.h
54567 --- linux-3.0.4/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
54568 +++ linux-3.0.4/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
54569 @@ -46,6 +46,8 @@ typedef unsigned int u32;
54570 typedef signed long s64;
54571 typedef unsigned long u64;
54572
54573 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54574 +
54575 #define S8_C(x) x
54576 #define U8_C(x) x ## U
54577 #define S16_C(x) x
54578 diff -urNp linux-3.0.4/include/asm-generic/int-ll64.h linux-3.0.4/include/asm-generic/int-ll64.h
54579 --- linux-3.0.4/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
54580 +++ linux-3.0.4/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
54581 @@ -51,6 +51,8 @@ typedef unsigned int u32;
54582 typedef signed long long s64;
54583 typedef unsigned long long u64;
54584
54585 +typedef unsigned long long intoverflow_t;
54586 +
54587 #define S8_C(x) x
54588 #define U8_C(x) x ## U
54589 #define S16_C(x) x
54590 diff -urNp linux-3.0.4/include/asm-generic/kmap_types.h linux-3.0.4/include/asm-generic/kmap_types.h
54591 --- linux-3.0.4/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
54592 +++ linux-3.0.4/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
54593 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
54594 KMAP_D(17) KM_NMI,
54595 KMAP_D(18) KM_NMI_PTE,
54596 KMAP_D(19) KM_KDB,
54597 +KMAP_D(20) KM_CLEARPAGE,
54598 /*
54599 * Remember to update debug_kmap_atomic() when adding new kmap types!
54600 */
54601 -KMAP_D(20) KM_TYPE_NR
54602 +KMAP_D(21) KM_TYPE_NR
54603 };
54604
54605 #undef KMAP_D
54606 diff -urNp linux-3.0.4/include/asm-generic/pgtable.h linux-3.0.4/include/asm-generic/pgtable.h
54607 --- linux-3.0.4/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
54608 +++ linux-3.0.4/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
54609 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
54610 #endif /* __HAVE_ARCH_PMD_WRITE */
54611 #endif
54612
54613 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54614 +static inline unsigned long pax_open_kernel(void) { return 0; }
54615 +#endif
54616 +
54617 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54618 +static inline unsigned long pax_close_kernel(void) { return 0; }
54619 +#endif
54620 +
54621 #endif /* !__ASSEMBLY__ */
54622
54623 #endif /* _ASM_GENERIC_PGTABLE_H */
54624 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopmd.h linux-3.0.4/include/asm-generic/pgtable-nopmd.h
54625 --- linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
54626 +++ linux-3.0.4/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
54627 @@ -1,14 +1,19 @@
54628 #ifndef _PGTABLE_NOPMD_H
54629 #define _PGTABLE_NOPMD_H
54630
54631 -#ifndef __ASSEMBLY__
54632 -
54633 #include <asm-generic/pgtable-nopud.h>
54634
54635 -struct mm_struct;
54636 -
54637 #define __PAGETABLE_PMD_FOLDED
54638
54639 +#define PMD_SHIFT PUD_SHIFT
54640 +#define PTRS_PER_PMD 1
54641 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54642 +#define PMD_MASK (~(PMD_SIZE-1))
54643 +
54644 +#ifndef __ASSEMBLY__
54645 +
54646 +struct mm_struct;
54647 +
54648 /*
54649 * Having the pmd type consist of a pud gets the size right, and allows
54650 * us to conceptually access the pud entry that this pmd is folded into
54651 @@ -16,11 +21,6 @@ struct mm_struct;
54652 */
54653 typedef struct { pud_t pud; } pmd_t;
54654
54655 -#define PMD_SHIFT PUD_SHIFT
54656 -#define PTRS_PER_PMD 1
54657 -#define PMD_SIZE (1UL << PMD_SHIFT)
54658 -#define PMD_MASK (~(PMD_SIZE-1))
54659 -
54660 /*
54661 * The "pud_xxx()" functions here are trivial for a folded two-level
54662 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54663 diff -urNp linux-3.0.4/include/asm-generic/pgtable-nopud.h linux-3.0.4/include/asm-generic/pgtable-nopud.h
54664 --- linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
54665 +++ linux-3.0.4/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
54666 @@ -1,10 +1,15 @@
54667 #ifndef _PGTABLE_NOPUD_H
54668 #define _PGTABLE_NOPUD_H
54669
54670 -#ifndef __ASSEMBLY__
54671 -
54672 #define __PAGETABLE_PUD_FOLDED
54673
54674 +#define PUD_SHIFT PGDIR_SHIFT
54675 +#define PTRS_PER_PUD 1
54676 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54677 +#define PUD_MASK (~(PUD_SIZE-1))
54678 +
54679 +#ifndef __ASSEMBLY__
54680 +
54681 /*
54682 * Having the pud type consist of a pgd gets the size right, and allows
54683 * us to conceptually access the pgd entry that this pud is folded into
54684 @@ -12,11 +17,6 @@
54685 */
54686 typedef struct { pgd_t pgd; } pud_t;
54687
54688 -#define PUD_SHIFT PGDIR_SHIFT
54689 -#define PTRS_PER_PUD 1
54690 -#define PUD_SIZE (1UL << PUD_SHIFT)
54691 -#define PUD_MASK (~(PUD_SIZE-1))
54692 -
54693 /*
54694 * The "pgd_xxx()" functions here are trivial for a folded two-level
54695 * setup: the pud is never bad, and a pud always exists (as it's folded
54696 diff -urNp linux-3.0.4/include/asm-generic/vmlinux.lds.h linux-3.0.4/include/asm-generic/vmlinux.lds.h
54697 --- linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
54698 +++ linux-3.0.4/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
54699 @@ -217,6 +217,7 @@
54700 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54701 VMLINUX_SYMBOL(__start_rodata) = .; \
54702 *(.rodata) *(.rodata.*) \
54703 + *(.data..read_only) \
54704 *(__vermagic) /* Kernel version magic */ \
54705 . = ALIGN(8); \
54706 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
54707 @@ -723,17 +724,18 @@
54708 * section in the linker script will go there too. @phdr should have
54709 * a leading colon.
54710 *
54711 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54712 + * Note that this macros defines per_cpu_load as an absolute symbol.
54713 * If there is no need to put the percpu section at a predetermined
54714 * address, use PERCPU_SECTION.
54715 */
54716 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
54717 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54718 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54719 + per_cpu_load = .; \
54720 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54721 - LOAD_OFFSET) { \
54722 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54723 PERCPU_INPUT(cacheline) \
54724 } phdr \
54725 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
54726 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
54727
54728 /**
54729 * PERCPU_SECTION - define output section for percpu area, simple version
54730 diff -urNp linux-3.0.4/include/drm/drm_crtc_helper.h linux-3.0.4/include/drm/drm_crtc_helper.h
54731 --- linux-3.0.4/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
54732 +++ linux-3.0.4/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
54733 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
54734
54735 /* disable crtc when not in use - more explicit than dpms off */
54736 void (*disable)(struct drm_crtc *crtc);
54737 -};
54738 +} __no_const;
54739
54740 struct drm_encoder_helper_funcs {
54741 void (*dpms)(struct drm_encoder *encoder, int mode);
54742 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
54743 struct drm_connector *connector);
54744 /* disable encoder when not in use - more explicit than dpms off */
54745 void (*disable)(struct drm_encoder *encoder);
54746 -};
54747 +} __no_const;
54748
54749 struct drm_connector_helper_funcs {
54750 int (*get_modes)(struct drm_connector *connector);
54751 diff -urNp linux-3.0.4/include/drm/drmP.h linux-3.0.4/include/drm/drmP.h
54752 --- linux-3.0.4/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
54753 +++ linux-3.0.4/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
54754 @@ -73,6 +73,7 @@
54755 #include <linux/workqueue.h>
54756 #include <linux/poll.h>
54757 #include <asm/pgalloc.h>
54758 +#include <asm/local.h>
54759 #include "drm.h"
54760
54761 #include <linux/idr.h>
54762 @@ -1033,7 +1034,7 @@ struct drm_device {
54763
54764 /** \name Usage Counters */
54765 /*@{ */
54766 - int open_count; /**< Outstanding files open */
54767 + local_t open_count; /**< Outstanding files open */
54768 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54769 atomic_t vma_count; /**< Outstanding vma areas open */
54770 int buf_use; /**< Buffers in use -- cannot alloc */
54771 @@ -1044,7 +1045,7 @@ struct drm_device {
54772 /*@{ */
54773 unsigned long counters;
54774 enum drm_stat_type types[15];
54775 - atomic_t counts[15];
54776 + atomic_unchecked_t counts[15];
54777 /*@} */
54778
54779 struct list_head filelist;
54780 diff -urNp linux-3.0.4/include/drm/ttm/ttm_memory.h linux-3.0.4/include/drm/ttm/ttm_memory.h
54781 --- linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
54782 +++ linux-3.0.4/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
54783 @@ -47,7 +47,7 @@
54784
54785 struct ttm_mem_shrink {
54786 int (*do_shrink) (struct ttm_mem_shrink *);
54787 -};
54788 +} __no_const;
54789
54790 /**
54791 * struct ttm_mem_global - Global memory accounting structure.
54792 diff -urNp linux-3.0.4/include/linux/a.out.h linux-3.0.4/include/linux/a.out.h
54793 --- linux-3.0.4/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
54794 +++ linux-3.0.4/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
54795 @@ -39,6 +39,14 @@ enum machine_type {
54796 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54797 };
54798
54799 +/* Constants for the N_FLAGS field */
54800 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54801 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54802 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54803 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54804 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54805 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54806 +
54807 #if !defined (N_MAGIC)
54808 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54809 #endif
54810 diff -urNp linux-3.0.4/include/linux/atmdev.h linux-3.0.4/include/linux/atmdev.h
54811 --- linux-3.0.4/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
54812 +++ linux-3.0.4/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
54813 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54814 #endif
54815
54816 struct k_atm_aal_stats {
54817 -#define __HANDLE_ITEM(i) atomic_t i
54818 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54819 __AAL_STAT_ITEMS
54820 #undef __HANDLE_ITEM
54821 };
54822 diff -urNp linux-3.0.4/include/linux/binfmts.h linux-3.0.4/include/linux/binfmts.h
54823 --- linux-3.0.4/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
54824 +++ linux-3.0.4/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
54825 @@ -88,6 +88,7 @@ struct linux_binfmt {
54826 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54827 int (*load_shlib)(struct file *);
54828 int (*core_dump)(struct coredump_params *cprm);
54829 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54830 unsigned long min_coredump; /* minimal dump size */
54831 };
54832
54833 diff -urNp linux-3.0.4/include/linux/blkdev.h linux-3.0.4/include/linux/blkdev.h
54834 --- linux-3.0.4/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
54835 +++ linux-3.0.4/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
54836 @@ -1308,7 +1308,7 @@ struct block_device_operations {
54837 /* this callback is with swap_lock and sometimes page table lock held */
54838 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
54839 struct module *owner;
54840 -};
54841 +} __do_const;
54842
54843 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54844 unsigned long);
54845 diff -urNp linux-3.0.4/include/linux/blktrace_api.h linux-3.0.4/include/linux/blktrace_api.h
54846 --- linux-3.0.4/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
54847 +++ linux-3.0.4/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
54848 @@ -161,7 +161,7 @@ struct blk_trace {
54849 struct dentry *dir;
54850 struct dentry *dropped_file;
54851 struct dentry *msg_file;
54852 - atomic_t dropped;
54853 + atomic_unchecked_t dropped;
54854 };
54855
54856 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54857 diff -urNp linux-3.0.4/include/linux/byteorder/little_endian.h linux-3.0.4/include/linux/byteorder/little_endian.h
54858 --- linux-3.0.4/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
54859 +++ linux-3.0.4/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
54860 @@ -42,51 +42,51 @@
54861
54862 static inline __le64 __cpu_to_le64p(const __u64 *p)
54863 {
54864 - return (__force __le64)*p;
54865 + return (__force const __le64)*p;
54866 }
54867 static inline __u64 __le64_to_cpup(const __le64 *p)
54868 {
54869 - return (__force __u64)*p;
54870 + return (__force const __u64)*p;
54871 }
54872 static inline __le32 __cpu_to_le32p(const __u32 *p)
54873 {
54874 - return (__force __le32)*p;
54875 + return (__force const __le32)*p;
54876 }
54877 static inline __u32 __le32_to_cpup(const __le32 *p)
54878 {
54879 - return (__force __u32)*p;
54880 + return (__force const __u32)*p;
54881 }
54882 static inline __le16 __cpu_to_le16p(const __u16 *p)
54883 {
54884 - return (__force __le16)*p;
54885 + return (__force const __le16)*p;
54886 }
54887 static inline __u16 __le16_to_cpup(const __le16 *p)
54888 {
54889 - return (__force __u16)*p;
54890 + return (__force const __u16)*p;
54891 }
54892 static inline __be64 __cpu_to_be64p(const __u64 *p)
54893 {
54894 - return (__force __be64)__swab64p(p);
54895 + return (__force const __be64)__swab64p(p);
54896 }
54897 static inline __u64 __be64_to_cpup(const __be64 *p)
54898 {
54899 - return __swab64p((__u64 *)p);
54900 + return __swab64p((const __u64 *)p);
54901 }
54902 static inline __be32 __cpu_to_be32p(const __u32 *p)
54903 {
54904 - return (__force __be32)__swab32p(p);
54905 + return (__force const __be32)__swab32p(p);
54906 }
54907 static inline __u32 __be32_to_cpup(const __be32 *p)
54908 {
54909 - return __swab32p((__u32 *)p);
54910 + return __swab32p((const __u32 *)p);
54911 }
54912 static inline __be16 __cpu_to_be16p(const __u16 *p)
54913 {
54914 - return (__force __be16)__swab16p(p);
54915 + return (__force const __be16)__swab16p(p);
54916 }
54917 static inline __u16 __be16_to_cpup(const __be16 *p)
54918 {
54919 - return __swab16p((__u16 *)p);
54920 + return __swab16p((const __u16 *)p);
54921 }
54922 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54923 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54924 diff -urNp linux-3.0.4/include/linux/cache.h linux-3.0.4/include/linux/cache.h
54925 --- linux-3.0.4/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
54926 +++ linux-3.0.4/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
54927 @@ -16,6 +16,10 @@
54928 #define __read_mostly
54929 #endif
54930
54931 +#ifndef __read_only
54932 +#define __read_only __read_mostly
54933 +#endif
54934 +
54935 #ifndef ____cacheline_aligned
54936 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54937 #endif
54938 diff -urNp linux-3.0.4/include/linux/capability.h linux-3.0.4/include/linux/capability.h
54939 --- linux-3.0.4/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
54940 +++ linux-3.0.4/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
54941 @@ -547,6 +547,9 @@ extern bool capable(int cap);
54942 extern bool ns_capable(struct user_namespace *ns, int cap);
54943 extern bool task_ns_capable(struct task_struct *t, int cap);
54944 extern bool nsown_capable(int cap);
54945 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
54946 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
54947 +extern bool capable_nolog(int cap);
54948
54949 /* audit system wants to get cap info from files as well */
54950 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
54951 diff -urNp linux-3.0.4/include/linux/cleancache.h linux-3.0.4/include/linux/cleancache.h
54952 --- linux-3.0.4/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
54953 +++ linux-3.0.4/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
54954 @@ -31,7 +31,7 @@ struct cleancache_ops {
54955 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
54956 void (*flush_inode)(int, struct cleancache_filekey);
54957 void (*flush_fs)(int);
54958 -};
54959 +} __no_const;
54960
54961 extern struct cleancache_ops
54962 cleancache_register_ops(struct cleancache_ops *ops);
54963 diff -urNp linux-3.0.4/include/linux/compiler-gcc4.h linux-3.0.4/include/linux/compiler-gcc4.h
54964 --- linux-3.0.4/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
54965 +++ linux-3.0.4/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
54966 @@ -31,6 +31,12 @@
54967
54968
54969 #if __GNUC_MINOR__ >= 5
54970 +
54971 +#ifdef CONSTIFY_PLUGIN
54972 +#define __no_const __attribute__((no_const))
54973 +#define __do_const __attribute__((do_const))
54974 +#endif
54975 +
54976 /*
54977 * Mark a position in code as unreachable. This can be used to
54978 * suppress control flow warnings after asm blocks that transfer
54979 @@ -46,6 +52,11 @@
54980 #define __noclone __attribute__((__noclone__))
54981
54982 #endif
54983 +
54984 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54985 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54986 +#define __bos0(ptr) __bos((ptr), 0)
54987 +#define __bos1(ptr) __bos((ptr), 1)
54988 #endif
54989
54990 #if __GNUC_MINOR__ > 0
54991 diff -urNp linux-3.0.4/include/linux/compiler.h linux-3.0.4/include/linux/compiler.h
54992 --- linux-3.0.4/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
54993 +++ linux-3.0.4/include/linux/compiler.h 2011-08-26 19:49:56.000000000 -0400
54994 @@ -264,6 +264,14 @@ void ftrace_likely_update(struct ftrace_
54995 # define __attribute_const__ /* unimplemented */
54996 #endif
54997
54998 +#ifndef __no_const
54999 +# define __no_const
55000 +#endif
55001 +
55002 +#ifndef __do_const
55003 +# define __do_const
55004 +#endif
55005 +
55006 /*
55007 * Tell gcc if a function is cold. The compiler will assume any path
55008 * directly leading to the call is unlikely.
55009 @@ -273,6 +281,22 @@ void ftrace_likely_update(struct ftrace_
55010 #define __cold
55011 #endif
55012
55013 +#ifndef __alloc_size
55014 +#define __alloc_size(...)
55015 +#endif
55016 +
55017 +#ifndef __bos
55018 +#define __bos(ptr, arg)
55019 +#endif
55020 +
55021 +#ifndef __bos0
55022 +#define __bos0(ptr)
55023 +#endif
55024 +
55025 +#ifndef __bos1
55026 +#define __bos1(ptr)
55027 +#endif
55028 +
55029 /* Simple shorthand for a section definition */
55030 #ifndef __section
55031 # define __section(S) __attribute__ ((__section__(#S)))
55032 @@ -306,6 +330,7 @@ void ftrace_likely_update(struct ftrace_
55033 * use is to mediate communication between process-level code and irq/NMI
55034 * handlers, all running on the same CPU.
55035 */
55036 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
55037 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
55038 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
55039
55040 #endif /* __LINUX_COMPILER_H */
55041 diff -urNp linux-3.0.4/include/linux/cpuset.h linux-3.0.4/include/linux/cpuset.h
55042 --- linux-3.0.4/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
55043 +++ linux-3.0.4/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
55044 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
55045 * nodemask.
55046 */
55047 smp_mb();
55048 - --ACCESS_ONCE(current->mems_allowed_change_disable);
55049 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
55050 }
55051
55052 static inline void set_mems_allowed(nodemask_t nodemask)
55053 diff -urNp linux-3.0.4/include/linux/crypto.h linux-3.0.4/include/linux/crypto.h
55054 --- linux-3.0.4/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
55055 +++ linux-3.0.4/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
55056 @@ -361,7 +361,7 @@ struct cipher_tfm {
55057 const u8 *key, unsigned int keylen);
55058 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
55059 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
55060 -};
55061 +} __no_const;
55062
55063 struct hash_tfm {
55064 int (*init)(struct hash_desc *desc);
55065 @@ -382,13 +382,13 @@ struct compress_tfm {
55066 int (*cot_decompress)(struct crypto_tfm *tfm,
55067 const u8 *src, unsigned int slen,
55068 u8 *dst, unsigned int *dlen);
55069 -};
55070 +} __no_const;
55071
55072 struct rng_tfm {
55073 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
55074 unsigned int dlen);
55075 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
55076 -};
55077 +} __no_const;
55078
55079 #define crt_ablkcipher crt_u.ablkcipher
55080 #define crt_aead crt_u.aead
55081 diff -urNp linux-3.0.4/include/linux/decompress/mm.h linux-3.0.4/include/linux/decompress/mm.h
55082 --- linux-3.0.4/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
55083 +++ linux-3.0.4/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
55084 @@ -77,7 +77,7 @@ static void free(void *where)
55085 * warnings when not needed (indeed large_malloc / large_free are not
55086 * needed by inflate */
55087
55088 -#define malloc(a) kmalloc(a, GFP_KERNEL)
55089 +#define malloc(a) kmalloc((a), GFP_KERNEL)
55090 #define free(a) kfree(a)
55091
55092 #define large_malloc(a) vmalloc(a)
55093 diff -urNp linux-3.0.4/include/linux/dma-mapping.h linux-3.0.4/include/linux/dma-mapping.h
55094 --- linux-3.0.4/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
55095 +++ linux-3.0.4/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
55096 @@ -50,7 +50,7 @@ struct dma_map_ops {
55097 int (*dma_supported)(struct device *dev, u64 mask);
55098 int (*set_dma_mask)(struct device *dev, u64 mask);
55099 int is_phys;
55100 -};
55101 +} __do_const;
55102
55103 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
55104
55105 diff -urNp linux-3.0.4/include/linux/efi.h linux-3.0.4/include/linux/efi.h
55106 --- linux-3.0.4/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
55107 +++ linux-3.0.4/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
55108 @@ -410,7 +410,7 @@ struct efivar_operations {
55109 efi_get_variable_t *get_variable;
55110 efi_get_next_variable_t *get_next_variable;
55111 efi_set_variable_t *set_variable;
55112 -};
55113 +} __no_const;
55114
55115 struct efivars {
55116 /*
55117 diff -urNp linux-3.0.4/include/linux/elf.h linux-3.0.4/include/linux/elf.h
55118 --- linux-3.0.4/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
55119 +++ linux-3.0.4/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
55120 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
55121 #define PT_GNU_EH_FRAME 0x6474e550
55122
55123 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
55124 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
55125 +
55126 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
55127 +
55128 +/* Constants for the e_flags field */
55129 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55130 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
55131 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
55132 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
55133 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55134 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55135
55136 /*
55137 * Extended Numbering
55138 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
55139 #define DT_DEBUG 21
55140 #define DT_TEXTREL 22
55141 #define DT_JMPREL 23
55142 +#define DT_FLAGS 30
55143 + #define DF_TEXTREL 0x00000004
55144 #define DT_ENCODING 32
55145 #define OLD_DT_LOOS 0x60000000
55146 #define DT_LOOS 0x6000000d
55147 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
55148 #define PF_W 0x2
55149 #define PF_X 0x1
55150
55151 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
55152 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
55153 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
55154 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
55155 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
55156 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
55157 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
55158 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
55159 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
55160 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
55161 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
55162 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
55163 +
55164 typedef struct elf32_phdr{
55165 Elf32_Word p_type;
55166 Elf32_Off p_offset;
55167 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
55168 #define EI_OSABI 7
55169 #define EI_PAD 8
55170
55171 +#define EI_PAX 14
55172 +
55173 #define ELFMAG0 0x7f /* EI_MAG */
55174 #define ELFMAG1 'E'
55175 #define ELFMAG2 'L'
55176 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
55177 #define elf_note elf32_note
55178 #define elf_addr_t Elf32_Off
55179 #define Elf_Half Elf32_Half
55180 +#define elf_dyn Elf32_Dyn
55181
55182 #else
55183
55184 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
55185 #define elf_note elf64_note
55186 #define elf_addr_t Elf64_Off
55187 #define Elf_Half Elf64_Half
55188 +#define elf_dyn Elf64_Dyn
55189
55190 #endif
55191
55192 diff -urNp linux-3.0.4/include/linux/firewire.h linux-3.0.4/include/linux/firewire.h
55193 --- linux-3.0.4/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
55194 +++ linux-3.0.4/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
55195 @@ -428,7 +428,7 @@ struct fw_iso_context {
55196 union {
55197 fw_iso_callback_t sc;
55198 fw_iso_mc_callback_t mc;
55199 - } callback;
55200 + } __no_const callback;
55201 void *callback_data;
55202 };
55203
55204 diff -urNp linux-3.0.4/include/linux/fscache-cache.h linux-3.0.4/include/linux/fscache-cache.h
55205 --- linux-3.0.4/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
55206 +++ linux-3.0.4/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
55207 @@ -102,7 +102,7 @@ struct fscache_operation {
55208 fscache_operation_release_t release;
55209 };
55210
55211 -extern atomic_t fscache_op_debug_id;
55212 +extern atomic_unchecked_t fscache_op_debug_id;
55213 extern void fscache_op_work_func(struct work_struct *work);
55214
55215 extern void fscache_enqueue_operation(struct fscache_operation *);
55216 @@ -122,7 +122,7 @@ static inline void fscache_operation_ini
55217 {
55218 INIT_WORK(&op->work, fscache_op_work_func);
55219 atomic_set(&op->usage, 1);
55220 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
55221 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
55222 op->processor = processor;
55223 op->release = release;
55224 INIT_LIST_HEAD(&op->pend_link);
55225 diff -urNp linux-3.0.4/include/linux/fs.h linux-3.0.4/include/linux/fs.h
55226 --- linux-3.0.4/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
55227 +++ linux-3.0.4/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
55228 @@ -109,6 +109,11 @@ struct inodes_stat_t {
55229 /* File was opened by fanotify and shouldn't generate fanotify events */
55230 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
55231
55232 +/* Hack for grsec so as not to require read permission simply to execute
55233 + * a binary
55234 + */
55235 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
55236 +
55237 /*
55238 * The below are the various read and write types that we support. Some of
55239 * them include behavioral modifiers that send information down to the
55240 @@ -1571,7 +1576,8 @@ struct file_operations {
55241 int (*setlease)(struct file *, long, struct file_lock **);
55242 long (*fallocate)(struct file *file, int mode, loff_t offset,
55243 loff_t len);
55244 -};
55245 +} __do_const;
55246 +typedef struct file_operations __no_const file_operations_no_const;
55247
55248 #define IPERM_FLAG_RCU 0x0001
55249
55250 diff -urNp linux-3.0.4/include/linux/fsnotify.h linux-3.0.4/include/linux/fsnotify.h
55251 --- linux-3.0.4/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
55252 +++ linux-3.0.4/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
55253 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
55254 */
55255 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
55256 {
55257 - return kstrdup(name, GFP_KERNEL);
55258 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
55259 }
55260
55261 /*
55262 diff -urNp linux-3.0.4/include/linux/fs_struct.h linux-3.0.4/include/linux/fs_struct.h
55263 --- linux-3.0.4/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
55264 +++ linux-3.0.4/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
55265 @@ -6,7 +6,7 @@
55266 #include <linux/seqlock.h>
55267
55268 struct fs_struct {
55269 - int users;
55270 + atomic_t users;
55271 spinlock_t lock;
55272 seqcount_t seq;
55273 int umask;
55274 diff -urNp linux-3.0.4/include/linux/ftrace_event.h linux-3.0.4/include/linux/ftrace_event.h
55275 --- linux-3.0.4/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
55276 +++ linux-3.0.4/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
55277 @@ -96,7 +96,7 @@ struct trace_event_functions {
55278 trace_print_func raw;
55279 trace_print_func hex;
55280 trace_print_func binary;
55281 -};
55282 +} __no_const;
55283
55284 struct trace_event {
55285 struct hlist_node node;
55286 @@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
55287 extern int trace_add_event_call(struct ftrace_event_call *call);
55288 extern void trace_remove_event_call(struct ftrace_event_call *call);
55289
55290 -#define is_signed_type(type) (((type)(-1)) < 0)
55291 +#define is_signed_type(type) (((type)(-1)) < (type)1)
55292
55293 int trace_set_clr_event(const char *system, const char *event, int set);
55294
55295 diff -urNp linux-3.0.4/include/linux/genhd.h linux-3.0.4/include/linux/genhd.h
55296 --- linux-3.0.4/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
55297 +++ linux-3.0.4/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
55298 @@ -184,7 +184,7 @@ struct gendisk {
55299 struct kobject *slave_dir;
55300
55301 struct timer_rand_state *random;
55302 - atomic_t sync_io; /* RAID */
55303 + atomic_unchecked_t sync_io; /* RAID */
55304 struct disk_events *ev;
55305 #ifdef CONFIG_BLK_DEV_INTEGRITY
55306 struct blk_integrity *integrity;
55307 diff -urNp linux-3.0.4/include/linux/gracl.h linux-3.0.4/include/linux/gracl.h
55308 --- linux-3.0.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
55309 +++ linux-3.0.4/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
55310 @@ -0,0 +1,317 @@
55311 +#ifndef GR_ACL_H
55312 +#define GR_ACL_H
55313 +
55314 +#include <linux/grdefs.h>
55315 +#include <linux/resource.h>
55316 +#include <linux/capability.h>
55317 +#include <linux/dcache.h>
55318 +#include <asm/resource.h>
55319 +
55320 +/* Major status information */
55321 +
55322 +#define GR_VERSION "grsecurity 2.2.2"
55323 +#define GRSECURITY_VERSION 0x2202
55324 +
55325 +enum {
55326 + GR_SHUTDOWN = 0,
55327 + GR_ENABLE = 1,
55328 + GR_SPROLE = 2,
55329 + GR_RELOAD = 3,
55330 + GR_SEGVMOD = 4,
55331 + GR_STATUS = 5,
55332 + GR_UNSPROLE = 6,
55333 + GR_PASSSET = 7,
55334 + GR_SPROLEPAM = 8,
55335 +};
55336 +
55337 +/* Password setup definitions
55338 + * kernel/grhash.c */
55339 +enum {
55340 + GR_PW_LEN = 128,
55341 + GR_SALT_LEN = 16,
55342 + GR_SHA_LEN = 32,
55343 +};
55344 +
55345 +enum {
55346 + GR_SPROLE_LEN = 64,
55347 +};
55348 +
55349 +enum {
55350 + GR_NO_GLOB = 0,
55351 + GR_REG_GLOB,
55352 + GR_CREATE_GLOB
55353 +};
55354 +
55355 +#define GR_NLIMITS 32
55356 +
55357 +/* Begin Data Structures */
55358 +
55359 +struct sprole_pw {
55360 + unsigned char *rolename;
55361 + unsigned char salt[GR_SALT_LEN];
55362 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
55363 +};
55364 +
55365 +struct name_entry {
55366 + __u32 key;
55367 + ino_t inode;
55368 + dev_t device;
55369 + char *name;
55370 + __u16 len;
55371 + __u8 deleted;
55372 + struct name_entry *prev;
55373 + struct name_entry *next;
55374 +};
55375 +
55376 +struct inodev_entry {
55377 + struct name_entry *nentry;
55378 + struct inodev_entry *prev;
55379 + struct inodev_entry *next;
55380 +};
55381 +
55382 +struct acl_role_db {
55383 + struct acl_role_label **r_hash;
55384 + __u32 r_size;
55385 +};
55386 +
55387 +struct inodev_db {
55388 + struct inodev_entry **i_hash;
55389 + __u32 i_size;
55390 +};
55391 +
55392 +struct name_db {
55393 + struct name_entry **n_hash;
55394 + __u32 n_size;
55395 +};
55396 +
55397 +struct crash_uid {
55398 + uid_t uid;
55399 + unsigned long expires;
55400 +};
55401 +
55402 +struct gr_hash_struct {
55403 + void **table;
55404 + void **nametable;
55405 + void *first;
55406 + __u32 table_size;
55407 + __u32 used_size;
55408 + int type;
55409 +};
55410 +
55411 +/* Userspace Grsecurity ACL data structures */
55412 +
55413 +struct acl_subject_label {
55414 + char *filename;
55415 + ino_t inode;
55416 + dev_t device;
55417 + __u32 mode;
55418 + kernel_cap_t cap_mask;
55419 + kernel_cap_t cap_lower;
55420 + kernel_cap_t cap_invert_audit;
55421 +
55422 + struct rlimit res[GR_NLIMITS];
55423 + __u32 resmask;
55424 +
55425 + __u8 user_trans_type;
55426 + __u8 group_trans_type;
55427 + uid_t *user_transitions;
55428 + gid_t *group_transitions;
55429 + __u16 user_trans_num;
55430 + __u16 group_trans_num;
55431 +
55432 + __u32 sock_families[2];
55433 + __u32 ip_proto[8];
55434 + __u32 ip_type;
55435 + struct acl_ip_label **ips;
55436 + __u32 ip_num;
55437 + __u32 inaddr_any_override;
55438 +
55439 + __u32 crashes;
55440 + unsigned long expires;
55441 +
55442 + struct acl_subject_label *parent_subject;
55443 + struct gr_hash_struct *hash;
55444 + struct acl_subject_label *prev;
55445 + struct acl_subject_label *next;
55446 +
55447 + struct acl_object_label **obj_hash;
55448 + __u32 obj_hash_size;
55449 + __u16 pax_flags;
55450 +};
55451 +
55452 +struct role_allowed_ip {
55453 + __u32 addr;
55454 + __u32 netmask;
55455 +
55456 + struct role_allowed_ip *prev;
55457 + struct role_allowed_ip *next;
55458 +};
55459 +
55460 +struct role_transition {
55461 + char *rolename;
55462 +
55463 + struct role_transition *prev;
55464 + struct role_transition *next;
55465 +};
55466 +
55467 +struct acl_role_label {
55468 + char *rolename;
55469 + uid_t uidgid;
55470 + __u16 roletype;
55471 +
55472 + __u16 auth_attempts;
55473 + unsigned long expires;
55474 +
55475 + struct acl_subject_label *root_label;
55476 + struct gr_hash_struct *hash;
55477 +
55478 + struct acl_role_label *prev;
55479 + struct acl_role_label *next;
55480 +
55481 + struct role_transition *transitions;
55482 + struct role_allowed_ip *allowed_ips;
55483 + uid_t *domain_children;
55484 + __u16 domain_child_num;
55485 +
55486 + struct acl_subject_label **subj_hash;
55487 + __u32 subj_hash_size;
55488 +};
55489 +
55490 +struct user_acl_role_db {
55491 + struct acl_role_label **r_table;
55492 + __u32 num_pointers; /* Number of allocations to track */
55493 + __u32 num_roles; /* Number of roles */
55494 + __u32 num_domain_children; /* Number of domain children */
55495 + __u32 num_subjects; /* Number of subjects */
55496 + __u32 num_objects; /* Number of objects */
55497 +};
55498 +
55499 +struct acl_object_label {
55500 + char *filename;
55501 + ino_t inode;
55502 + dev_t device;
55503 + __u32 mode;
55504 +
55505 + struct acl_subject_label *nested;
55506 + struct acl_object_label *globbed;
55507 +
55508 + /* next two structures not used */
55509 +
55510 + struct acl_object_label *prev;
55511 + struct acl_object_label *next;
55512 +};
55513 +
55514 +struct acl_ip_label {
55515 + char *iface;
55516 + __u32 addr;
55517 + __u32 netmask;
55518 + __u16 low, high;
55519 + __u8 mode;
55520 + __u32 type;
55521 + __u32 proto[8];
55522 +
55523 + /* next two structures not used */
55524 +
55525 + struct acl_ip_label *prev;
55526 + struct acl_ip_label *next;
55527 +};
55528 +
55529 +struct gr_arg {
55530 + struct user_acl_role_db role_db;
55531 + unsigned char pw[GR_PW_LEN];
55532 + unsigned char salt[GR_SALT_LEN];
55533 + unsigned char sum[GR_SHA_LEN];
55534 + unsigned char sp_role[GR_SPROLE_LEN];
55535 + struct sprole_pw *sprole_pws;
55536 + dev_t segv_device;
55537 + ino_t segv_inode;
55538 + uid_t segv_uid;
55539 + __u16 num_sprole_pws;
55540 + __u16 mode;
55541 +};
55542 +
55543 +struct gr_arg_wrapper {
55544 + struct gr_arg *arg;
55545 + __u32 version;
55546 + __u32 size;
55547 +};
55548 +
55549 +struct subject_map {
55550 + struct acl_subject_label *user;
55551 + struct acl_subject_label *kernel;
55552 + struct subject_map *prev;
55553 + struct subject_map *next;
55554 +};
55555 +
55556 +struct acl_subj_map_db {
55557 + struct subject_map **s_hash;
55558 + __u32 s_size;
55559 +};
55560 +
55561 +/* End Data Structures Section */
55562 +
55563 +/* Hash functions generated by empirical testing by Brad Spengler
55564 + Makes good use of the low bits of the inode. Generally 0-1 times
55565 + in loop for successful match. 0-3 for unsuccessful match.
55566 + Shift/add algorithm with modulus of table size and an XOR*/
55567 +
55568 +static __inline__ unsigned int
55569 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55570 +{
55571 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55572 +}
55573 +
55574 + static __inline__ unsigned int
55575 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55576 +{
55577 + return ((const unsigned long)userp % sz);
55578 +}
55579 +
55580 +static __inline__ unsigned int
55581 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55582 +{
55583 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55584 +}
55585 +
55586 +static __inline__ unsigned int
55587 +nhash(const char *name, const __u16 len, const unsigned int sz)
55588 +{
55589 + return full_name_hash((const unsigned char *)name, len) % sz;
55590 +}
55591 +
55592 +#define FOR_EACH_ROLE_START(role) \
55593 + role = role_list; \
55594 + while (role) {
55595 +
55596 +#define FOR_EACH_ROLE_END(role) \
55597 + role = role->prev; \
55598 + }
55599 +
55600 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55601 + subj = NULL; \
55602 + iter = 0; \
55603 + while (iter < role->subj_hash_size) { \
55604 + if (subj == NULL) \
55605 + subj = role->subj_hash[iter]; \
55606 + if (subj == NULL) { \
55607 + iter++; \
55608 + continue; \
55609 + }
55610 +
55611 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55612 + subj = subj->next; \
55613 + if (subj == NULL) \
55614 + iter++; \
55615 + }
55616 +
55617 +
55618 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55619 + subj = role->hash->first; \
55620 + while (subj != NULL) {
55621 +
55622 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55623 + subj = subj->next; \
55624 + }
55625 +
55626 +#endif
55627 +
55628 diff -urNp linux-3.0.4/include/linux/gralloc.h linux-3.0.4/include/linux/gralloc.h
55629 --- linux-3.0.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55630 +++ linux-3.0.4/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
55631 @@ -0,0 +1,9 @@
55632 +#ifndef __GRALLOC_H
55633 +#define __GRALLOC_H
55634 +
55635 +void acl_free_all(void);
55636 +int acl_alloc_stack_init(unsigned long size);
55637 +void *acl_alloc(unsigned long len);
55638 +void *acl_alloc_num(unsigned long num, unsigned long len);
55639 +
55640 +#endif
55641 diff -urNp linux-3.0.4/include/linux/grdefs.h linux-3.0.4/include/linux/grdefs.h
55642 --- linux-3.0.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55643 +++ linux-3.0.4/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
55644 @@ -0,0 +1,140 @@
55645 +#ifndef GRDEFS_H
55646 +#define GRDEFS_H
55647 +
55648 +/* Begin grsecurity status declarations */
55649 +
55650 +enum {
55651 + GR_READY = 0x01,
55652 + GR_STATUS_INIT = 0x00 // disabled state
55653 +};
55654 +
55655 +/* Begin ACL declarations */
55656 +
55657 +/* Role flags */
55658 +
55659 +enum {
55660 + GR_ROLE_USER = 0x0001,
55661 + GR_ROLE_GROUP = 0x0002,
55662 + GR_ROLE_DEFAULT = 0x0004,
55663 + GR_ROLE_SPECIAL = 0x0008,
55664 + GR_ROLE_AUTH = 0x0010,
55665 + GR_ROLE_NOPW = 0x0020,
55666 + GR_ROLE_GOD = 0x0040,
55667 + GR_ROLE_LEARN = 0x0080,
55668 + GR_ROLE_TPE = 0x0100,
55669 + GR_ROLE_DOMAIN = 0x0200,
55670 + GR_ROLE_PAM = 0x0400,
55671 + GR_ROLE_PERSIST = 0x0800
55672 +};
55673 +
55674 +/* ACL Subject and Object mode flags */
55675 +enum {
55676 + GR_DELETED = 0x80000000
55677 +};
55678 +
55679 +/* ACL Object-only mode flags */
55680 +enum {
55681 + GR_READ = 0x00000001,
55682 + GR_APPEND = 0x00000002,
55683 + GR_WRITE = 0x00000004,
55684 + GR_EXEC = 0x00000008,
55685 + GR_FIND = 0x00000010,
55686 + GR_INHERIT = 0x00000020,
55687 + GR_SETID = 0x00000040,
55688 + GR_CREATE = 0x00000080,
55689 + GR_DELETE = 0x00000100,
55690 + GR_LINK = 0x00000200,
55691 + GR_AUDIT_READ = 0x00000400,
55692 + GR_AUDIT_APPEND = 0x00000800,
55693 + GR_AUDIT_WRITE = 0x00001000,
55694 + GR_AUDIT_EXEC = 0x00002000,
55695 + GR_AUDIT_FIND = 0x00004000,
55696 + GR_AUDIT_INHERIT= 0x00008000,
55697 + GR_AUDIT_SETID = 0x00010000,
55698 + GR_AUDIT_CREATE = 0x00020000,
55699 + GR_AUDIT_DELETE = 0x00040000,
55700 + GR_AUDIT_LINK = 0x00080000,
55701 + GR_PTRACERD = 0x00100000,
55702 + GR_NOPTRACE = 0x00200000,
55703 + GR_SUPPRESS = 0x00400000,
55704 + GR_NOLEARN = 0x00800000,
55705 + GR_INIT_TRANSFER= 0x01000000
55706 +};
55707 +
55708 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55709 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55710 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55711 +
55712 +/* ACL subject-only mode flags */
55713 +enum {
55714 + GR_KILL = 0x00000001,
55715 + GR_VIEW = 0x00000002,
55716 + GR_PROTECTED = 0x00000004,
55717 + GR_LEARN = 0x00000008,
55718 + GR_OVERRIDE = 0x00000010,
55719 + /* just a placeholder, this mode is only used in userspace */
55720 + GR_DUMMY = 0x00000020,
55721 + GR_PROTSHM = 0x00000040,
55722 + GR_KILLPROC = 0x00000080,
55723 + GR_KILLIPPROC = 0x00000100,
55724 + /* just a placeholder, this mode is only used in userspace */
55725 + GR_NOTROJAN = 0x00000200,
55726 + GR_PROTPROCFD = 0x00000400,
55727 + GR_PROCACCT = 0x00000800,
55728 + GR_RELAXPTRACE = 0x00001000,
55729 + GR_NESTED = 0x00002000,
55730 + GR_INHERITLEARN = 0x00004000,
55731 + GR_PROCFIND = 0x00008000,
55732 + GR_POVERRIDE = 0x00010000,
55733 + GR_KERNELAUTH = 0x00020000,
55734 + GR_ATSECURE = 0x00040000,
55735 + GR_SHMEXEC = 0x00080000
55736 +};
55737 +
55738 +enum {
55739 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55740 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55741 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55742 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55743 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55744 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55745 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55746 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55747 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55748 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55749 +};
55750 +
55751 +enum {
55752 + GR_ID_USER = 0x01,
55753 + GR_ID_GROUP = 0x02,
55754 +};
55755 +
55756 +enum {
55757 + GR_ID_ALLOW = 0x01,
55758 + GR_ID_DENY = 0x02,
55759 +};
55760 +
55761 +#define GR_CRASH_RES 31
55762 +#define GR_UIDTABLE_MAX 500
55763 +
55764 +/* begin resource learning section */
55765 +enum {
55766 + GR_RLIM_CPU_BUMP = 60,
55767 + GR_RLIM_FSIZE_BUMP = 50000,
55768 + GR_RLIM_DATA_BUMP = 10000,
55769 + GR_RLIM_STACK_BUMP = 1000,
55770 + GR_RLIM_CORE_BUMP = 10000,
55771 + GR_RLIM_RSS_BUMP = 500000,
55772 + GR_RLIM_NPROC_BUMP = 1,
55773 + GR_RLIM_NOFILE_BUMP = 5,
55774 + GR_RLIM_MEMLOCK_BUMP = 50000,
55775 + GR_RLIM_AS_BUMP = 500000,
55776 + GR_RLIM_LOCKS_BUMP = 2,
55777 + GR_RLIM_SIGPENDING_BUMP = 5,
55778 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55779 + GR_RLIM_NICE_BUMP = 1,
55780 + GR_RLIM_RTPRIO_BUMP = 1,
55781 + GR_RLIM_RTTIME_BUMP = 1000000
55782 +};
55783 +
55784 +#endif
55785 diff -urNp linux-3.0.4/include/linux/grinternal.h linux-3.0.4/include/linux/grinternal.h
55786 --- linux-3.0.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55787 +++ linux-3.0.4/include/linux/grinternal.h 2011-09-24 08:43:45.000000000 -0400
55788 @@ -0,0 +1,219 @@
55789 +#ifndef __GRINTERNAL_H
55790 +#define __GRINTERNAL_H
55791 +
55792 +#ifdef CONFIG_GRKERNSEC
55793 +
55794 +#include <linux/fs.h>
55795 +#include <linux/mnt_namespace.h>
55796 +#include <linux/nsproxy.h>
55797 +#include <linux/gracl.h>
55798 +#include <linux/grdefs.h>
55799 +#include <linux/grmsg.h>
55800 +
55801 +void gr_add_learn_entry(const char *fmt, ...)
55802 + __attribute__ ((format (printf, 1, 2)));
55803 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55804 + const struct vfsmount *mnt);
55805 +__u32 gr_check_create(const struct dentry *new_dentry,
55806 + const struct dentry *parent,
55807 + const struct vfsmount *mnt, const __u32 mode);
55808 +int gr_check_protected_task(const struct task_struct *task);
55809 +__u32 to_gr_audit(const __u32 reqmode);
55810 +int gr_set_acls(const int type);
55811 +int gr_apply_subject_to_task(struct task_struct *task);
55812 +int gr_acl_is_enabled(void);
55813 +char gr_roletype_to_char(void);
55814 +
55815 +void gr_handle_alertkill(struct task_struct *task);
55816 +char *gr_to_filename(const struct dentry *dentry,
55817 + const struct vfsmount *mnt);
55818 +char *gr_to_filename1(const struct dentry *dentry,
55819 + const struct vfsmount *mnt);
55820 +char *gr_to_filename2(const struct dentry *dentry,
55821 + const struct vfsmount *mnt);
55822 +char *gr_to_filename3(const struct dentry *dentry,
55823 + const struct vfsmount *mnt);
55824 +
55825 +extern int grsec_enable_harden_ptrace;
55826 +extern int grsec_enable_link;
55827 +extern int grsec_enable_fifo;
55828 +extern int grsec_enable_execve;
55829 +extern int grsec_enable_shm;
55830 +extern int grsec_enable_execlog;
55831 +extern int grsec_enable_signal;
55832 +extern int grsec_enable_audit_ptrace;
55833 +extern int grsec_enable_forkfail;
55834 +extern int grsec_enable_time;
55835 +extern int grsec_enable_rofs;
55836 +extern int grsec_enable_chroot_shmat;
55837 +extern int grsec_enable_chroot_mount;
55838 +extern int grsec_enable_chroot_double;
55839 +extern int grsec_enable_chroot_pivot;
55840 +extern int grsec_enable_chroot_chdir;
55841 +extern int grsec_enable_chroot_chmod;
55842 +extern int grsec_enable_chroot_mknod;
55843 +extern int grsec_enable_chroot_fchdir;
55844 +extern int grsec_enable_chroot_nice;
55845 +extern int grsec_enable_chroot_execlog;
55846 +extern int grsec_enable_chroot_caps;
55847 +extern int grsec_enable_chroot_sysctl;
55848 +extern int grsec_enable_chroot_unix;
55849 +extern int grsec_enable_tpe;
55850 +extern int grsec_tpe_gid;
55851 +extern int grsec_enable_tpe_all;
55852 +extern int grsec_enable_tpe_invert;
55853 +extern int grsec_enable_socket_all;
55854 +extern int grsec_socket_all_gid;
55855 +extern int grsec_enable_socket_client;
55856 +extern int grsec_socket_client_gid;
55857 +extern int grsec_enable_socket_server;
55858 +extern int grsec_socket_server_gid;
55859 +extern int grsec_audit_gid;
55860 +extern int grsec_enable_group;
55861 +extern int grsec_enable_audit_textrel;
55862 +extern int grsec_enable_log_rwxmaps;
55863 +extern int grsec_enable_mount;
55864 +extern int grsec_enable_chdir;
55865 +extern int grsec_resource_logging;
55866 +extern int grsec_enable_blackhole;
55867 +extern int grsec_lastack_retries;
55868 +extern int grsec_enable_brute;
55869 +extern int grsec_lock;
55870 +
55871 +extern spinlock_t grsec_alert_lock;
55872 +extern unsigned long grsec_alert_wtime;
55873 +extern unsigned long grsec_alert_fyet;
55874 +
55875 +extern spinlock_t grsec_audit_lock;
55876 +
55877 +extern rwlock_t grsec_exec_file_lock;
55878 +
55879 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55880 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55881 + (tsk)->exec_file->f_vfsmnt) : "/")
55882 +
55883 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55884 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55885 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55886 +
55887 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55888 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55889 + (tsk)->exec_file->f_vfsmnt) : "/")
55890 +
55891 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55892 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55893 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55894 +
55895 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55896 +
55897 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55898 +
55899 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55900 + (task)->pid, (cred)->uid, \
55901 + (cred)->euid, (cred)->gid, (cred)->egid, \
55902 + gr_parent_task_fullpath(task), \
55903 + (task)->real_parent->comm, (task)->real_parent->pid, \
55904 + (pcred)->uid, (pcred)->euid, \
55905 + (pcred)->gid, (pcred)->egid
55906 +
55907 +#define GR_CHROOT_CAPS {{ \
55908 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55909 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55910 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55911 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55912 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55913 + CAP_TO_MASK(CAP_IPC_OWNER) , CAP_TO_MASK(CAP_SYSLOG) }}
55914 +
55915 +#define security_learn(normal_msg,args...) \
55916 +({ \
55917 + read_lock(&grsec_exec_file_lock); \
55918 + gr_add_learn_entry(normal_msg "\n", ## args); \
55919 + read_unlock(&grsec_exec_file_lock); \
55920 +})
55921 +
55922 +enum {
55923 + GR_DO_AUDIT,
55924 + GR_DONT_AUDIT,
55925 + /* used for non-audit messages that we shouldn't kill the task on */
55926 + GR_DONT_AUDIT_GOOD
55927 +};
55928 +
55929 +enum {
55930 + GR_TTYSNIFF,
55931 + GR_RBAC,
55932 + GR_RBAC_STR,
55933 + GR_STR_RBAC,
55934 + GR_RBAC_MODE2,
55935 + GR_RBAC_MODE3,
55936 + GR_FILENAME,
55937 + GR_SYSCTL_HIDDEN,
55938 + GR_NOARGS,
55939 + GR_ONE_INT,
55940 + GR_ONE_INT_TWO_STR,
55941 + GR_ONE_STR,
55942 + GR_STR_INT,
55943 + GR_TWO_STR_INT,
55944 + GR_TWO_INT,
55945 + GR_TWO_U64,
55946 + GR_THREE_INT,
55947 + GR_FIVE_INT_TWO_STR,
55948 + GR_TWO_STR,
55949 + GR_THREE_STR,
55950 + GR_FOUR_STR,
55951 + GR_STR_FILENAME,
55952 + GR_FILENAME_STR,
55953 + GR_FILENAME_TWO_INT,
55954 + GR_FILENAME_TWO_INT_STR,
55955 + GR_TEXTREL,
55956 + GR_PTRACE,
55957 + GR_RESOURCE,
55958 + GR_CAP,
55959 + GR_SIG,
55960 + GR_SIG2,
55961 + GR_CRASH1,
55962 + GR_CRASH2,
55963 + GR_PSACCT,
55964 + GR_RWXMAP
55965 +};
55966 +
55967 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55968 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55969 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55970 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55971 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55972 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55973 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55974 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55975 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55976 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55977 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55978 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55979 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55980 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55981 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55982 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55983 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55984 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55985 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55986 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55987 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55988 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55989 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55990 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55991 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55992 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55993 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55994 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55995 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55996 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55997 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55998 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55999 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
56000 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
56001 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
56002 +
56003 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
56004 +
56005 +#endif
56006 +
56007 +#endif
56008 diff -urNp linux-3.0.4/include/linux/grmsg.h linux-3.0.4/include/linux/grmsg.h
56009 --- linux-3.0.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
56010 +++ linux-3.0.4/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
56011 @@ -0,0 +1,108 @@
56012 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
56013 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
56014 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
56015 +#define GR_STOPMOD_MSG "denied modification of module state by "
56016 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
56017 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
56018 +#define GR_IOPERM_MSG "denied use of ioperm() by "
56019 +#define GR_IOPL_MSG "denied use of iopl() by "
56020 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
56021 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
56022 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
56023 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
56024 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
56025 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
56026 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
56027 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
56028 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
56029 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
56030 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
56031 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
56032 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
56033 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
56034 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
56035 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
56036 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
56037 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
56038 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
56039 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
56040 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
56041 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
56042 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
56043 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
56044 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
56045 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
56046 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
56047 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
56048 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
56049 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
56050 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
56051 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
56052 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
56053 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
56054 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
56055 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
56056 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
56057 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
56058 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
56059 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
56060 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
56061 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
56062 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
56063 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
56064 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
56065 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
56066 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
56067 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
56068 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
56069 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
56070 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
56071 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
56072 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
56073 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
56074 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
56075 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
56076 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
56077 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
56078 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
56079 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
56080 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
56081 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
56082 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
56083 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
56084 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
56085 +#define GR_NICE_CHROOT_MSG "denied priority change by "
56086 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
56087 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
56088 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
56089 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
56090 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
56091 +#define GR_TIME_MSG "time set by "
56092 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
56093 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
56094 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
56095 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
56096 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
56097 +#define GR_BIND_MSG "denied bind() by "
56098 +#define GR_CONNECT_MSG "denied connect() by "
56099 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
56100 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
56101 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
56102 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
56103 +#define GR_CAP_ACL_MSG "use of %s denied for "
56104 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
56105 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
56106 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
56107 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
56108 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
56109 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
56110 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
56111 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
56112 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
56113 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
56114 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
56115 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
56116 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
56117 +#define GR_VM86_MSG "denied use of vm86 by "
56118 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
56119 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
56120 diff -urNp linux-3.0.4/include/linux/grsecurity.h linux-3.0.4/include/linux/grsecurity.h
56121 --- linux-3.0.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
56122 +++ linux-3.0.4/include/linux/grsecurity.h 2011-09-14 09:16:54.000000000 -0400
56123 @@ -0,0 +1,226 @@
56124 +#ifndef GR_SECURITY_H
56125 +#define GR_SECURITY_H
56126 +#include <linux/fs.h>
56127 +#include <linux/fs_struct.h>
56128 +#include <linux/binfmts.h>
56129 +#include <linux/gracl.h>
56130 +
56131 +/* notify of brain-dead configs */
56132 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56133 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
56134 +#endif
56135 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
56136 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
56137 +#endif
56138 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56139 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56140 +#endif
56141 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56142 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56143 +#endif
56144 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
56145 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
56146 +#endif
56147 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
56148 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
56149 +#endif
56150 +
56151 +#include <linux/compat.h>
56152 +
56153 +struct user_arg_ptr {
56154 +#ifdef CONFIG_COMPAT
56155 + bool is_compat;
56156 +#endif
56157 + union {
56158 + const char __user *const __user *native;
56159 +#ifdef CONFIG_COMPAT
56160 + compat_uptr_t __user *compat;
56161 +#endif
56162 + } ptr;
56163 +};
56164 +
56165 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
56166 +void gr_handle_brute_check(void);
56167 +void gr_handle_kernel_exploit(void);
56168 +int gr_process_user_ban(void);
56169 +
56170 +char gr_roletype_to_char(void);
56171 +
56172 +int gr_acl_enable_at_secure(void);
56173 +
56174 +int gr_check_user_change(int real, int effective, int fs);
56175 +int gr_check_group_change(int real, int effective, int fs);
56176 +
56177 +void gr_del_task_from_ip_table(struct task_struct *p);
56178 +
56179 +int gr_pid_is_chrooted(struct task_struct *p);
56180 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
56181 +int gr_handle_chroot_nice(void);
56182 +int gr_handle_chroot_sysctl(const int op);
56183 +int gr_handle_chroot_setpriority(struct task_struct *p,
56184 + const int niceval);
56185 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
56186 +int gr_handle_chroot_chroot(const struct dentry *dentry,
56187 + const struct vfsmount *mnt);
56188 +void gr_handle_chroot_chdir(struct path *path);
56189 +int gr_handle_chroot_chmod(const struct dentry *dentry,
56190 + const struct vfsmount *mnt, const int mode);
56191 +int gr_handle_chroot_mknod(const struct dentry *dentry,
56192 + const struct vfsmount *mnt, const int mode);
56193 +int gr_handle_chroot_mount(const struct dentry *dentry,
56194 + const struct vfsmount *mnt,
56195 + const char *dev_name);
56196 +int gr_handle_chroot_pivot(void);
56197 +int gr_handle_chroot_unix(const pid_t pid);
56198 +
56199 +int gr_handle_rawio(const struct inode *inode);
56200 +
56201 +void gr_handle_ioperm(void);
56202 +void gr_handle_iopl(void);
56203 +
56204 +int gr_tpe_allow(const struct file *file);
56205 +
56206 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
56207 +void gr_clear_chroot_entries(struct task_struct *task);
56208 +
56209 +void gr_log_forkfail(const int retval);
56210 +void gr_log_timechange(void);
56211 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
56212 +void gr_log_chdir(const struct dentry *dentry,
56213 + const struct vfsmount *mnt);
56214 +void gr_log_chroot_exec(const struct dentry *dentry,
56215 + const struct vfsmount *mnt);
56216 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
56217 +void gr_log_remount(const char *devname, const int retval);
56218 +void gr_log_unmount(const char *devname, const int retval);
56219 +void gr_log_mount(const char *from, const char *to, const int retval);
56220 +void gr_log_textrel(struct vm_area_struct *vma);
56221 +void gr_log_rwxmmap(struct file *file);
56222 +void gr_log_rwxmprotect(struct file *file);
56223 +
56224 +int gr_handle_follow_link(const struct inode *parent,
56225 + const struct inode *inode,
56226 + const struct dentry *dentry,
56227 + const struct vfsmount *mnt);
56228 +int gr_handle_fifo(const struct dentry *dentry,
56229 + const struct vfsmount *mnt,
56230 + const struct dentry *dir, const int flag,
56231 + const int acc_mode);
56232 +int gr_handle_hardlink(const struct dentry *dentry,
56233 + const struct vfsmount *mnt,
56234 + struct inode *inode,
56235 + const int mode, const char *to);
56236 +
56237 +int gr_is_capable(const int cap);
56238 +int gr_is_capable_nolog(const int cap);
56239 +void gr_learn_resource(const struct task_struct *task, const int limit,
56240 + const unsigned long wanted, const int gt);
56241 +void gr_copy_label(struct task_struct *tsk);
56242 +void gr_handle_crash(struct task_struct *task, const int sig);
56243 +int gr_handle_signal(const struct task_struct *p, const int sig);
56244 +int gr_check_crash_uid(const uid_t uid);
56245 +int gr_check_protected_task(const struct task_struct *task);
56246 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
56247 +int gr_acl_handle_mmap(const struct file *file,
56248 + const unsigned long prot);
56249 +int gr_acl_handle_mprotect(const struct file *file,
56250 + const unsigned long prot);
56251 +int gr_check_hidden_task(const struct task_struct *tsk);
56252 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
56253 + const struct vfsmount *mnt);
56254 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
56255 + const struct vfsmount *mnt);
56256 +__u32 gr_acl_handle_access(const struct dentry *dentry,
56257 + const struct vfsmount *mnt, const int fmode);
56258 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
56259 + const struct vfsmount *mnt, mode_t mode);
56260 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
56261 + const struct vfsmount *mnt, mode_t mode);
56262 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
56263 + const struct vfsmount *mnt);
56264 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
56265 + const struct vfsmount *mnt);
56266 +int gr_handle_ptrace(struct task_struct *task, const long request);
56267 +int gr_handle_proc_ptrace(struct task_struct *task);
56268 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
56269 + const struct vfsmount *mnt);
56270 +int gr_check_crash_exec(const struct file *filp);
56271 +int gr_acl_is_enabled(void);
56272 +void gr_set_kernel_label(struct task_struct *task);
56273 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
56274 + const gid_t gid);
56275 +int gr_set_proc_label(const struct dentry *dentry,
56276 + const struct vfsmount *mnt,
56277 + const int unsafe_share);
56278 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
56279 + const struct vfsmount *mnt);
56280 +__u32 gr_acl_handle_open(const struct dentry *dentry,
56281 + const struct vfsmount *mnt, const int fmode);
56282 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
56283 + const struct dentry *p_dentry,
56284 + const struct vfsmount *p_mnt, const int fmode,
56285 + const int imode);
56286 +void gr_handle_create(const struct dentry *dentry,
56287 + const struct vfsmount *mnt);
56288 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
56289 + const struct dentry *parent_dentry,
56290 + const struct vfsmount *parent_mnt,
56291 + const int mode);
56292 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
56293 + const struct dentry *parent_dentry,
56294 + const struct vfsmount *parent_mnt);
56295 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
56296 + const struct vfsmount *mnt);
56297 +void gr_handle_delete(const ino_t ino, const dev_t dev);
56298 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
56299 + const struct vfsmount *mnt);
56300 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
56301 + const struct dentry *parent_dentry,
56302 + const struct vfsmount *parent_mnt,
56303 + const char *from);
56304 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
56305 + const struct dentry *parent_dentry,
56306 + const struct vfsmount *parent_mnt,
56307 + const struct dentry *old_dentry,
56308 + const struct vfsmount *old_mnt, const char *to);
56309 +int gr_acl_handle_rename(struct dentry *new_dentry,
56310 + struct dentry *parent_dentry,
56311 + const struct vfsmount *parent_mnt,
56312 + struct dentry *old_dentry,
56313 + struct inode *old_parent_inode,
56314 + struct vfsmount *old_mnt, const char *newname);
56315 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56316 + struct dentry *old_dentry,
56317 + struct dentry *new_dentry,
56318 + struct vfsmount *mnt, const __u8 replace);
56319 +__u32 gr_check_link(const struct dentry *new_dentry,
56320 + const struct dentry *parent_dentry,
56321 + const struct vfsmount *parent_mnt,
56322 + const struct dentry *old_dentry,
56323 + const struct vfsmount *old_mnt);
56324 +int gr_acl_handle_filldir(const struct file *file, const char *name,
56325 + const unsigned int namelen, const ino_t ino);
56326 +
56327 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
56328 + const struct vfsmount *mnt);
56329 +void gr_acl_handle_exit(void);
56330 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
56331 +int gr_acl_handle_procpidmem(const struct task_struct *task);
56332 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
56333 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
56334 +void gr_audit_ptrace(struct task_struct *task);
56335 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
56336 +
56337 +#ifdef CONFIG_GRKERNSEC
56338 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
56339 +void gr_handle_vm86(void);
56340 +void gr_handle_mem_readwrite(u64 from, u64 to);
56341 +
56342 +extern int grsec_enable_dmesg;
56343 +extern int grsec_disable_privio;
56344 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56345 +extern int grsec_enable_chroot_findtask;
56346 +#endif
56347 +#endif
56348 +
56349 +#endif
56350 diff -urNp linux-3.0.4/include/linux/grsock.h linux-3.0.4/include/linux/grsock.h
56351 --- linux-3.0.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
56352 +++ linux-3.0.4/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
56353 @@ -0,0 +1,19 @@
56354 +#ifndef __GRSOCK_H
56355 +#define __GRSOCK_H
56356 +
56357 +extern void gr_attach_curr_ip(const struct sock *sk);
56358 +extern int gr_handle_sock_all(const int family, const int type,
56359 + const int protocol);
56360 +extern int gr_handle_sock_server(const struct sockaddr *sck);
56361 +extern int gr_handle_sock_server_other(const struct sock *sck);
56362 +extern int gr_handle_sock_client(const struct sockaddr *sck);
56363 +extern int gr_search_connect(struct socket * sock,
56364 + struct sockaddr_in * addr);
56365 +extern int gr_search_bind(struct socket * sock,
56366 + struct sockaddr_in * addr);
56367 +extern int gr_search_listen(struct socket * sock);
56368 +extern int gr_search_accept(struct socket * sock);
56369 +extern int gr_search_socket(const int domain, const int type,
56370 + const int protocol);
56371 +
56372 +#endif
56373 diff -urNp linux-3.0.4/include/linux/hid.h linux-3.0.4/include/linux/hid.h
56374 --- linux-3.0.4/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
56375 +++ linux-3.0.4/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
56376 @@ -675,7 +675,7 @@ struct hid_ll_driver {
56377 unsigned int code, int value);
56378
56379 int (*parse)(struct hid_device *hdev);
56380 -};
56381 +} __no_const;
56382
56383 #define PM_HINT_FULLON 1<<5
56384 #define PM_HINT_NORMAL 1<<1
56385 diff -urNp linux-3.0.4/include/linux/highmem.h linux-3.0.4/include/linux/highmem.h
56386 --- linux-3.0.4/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
56387 +++ linux-3.0.4/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
56388 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
56389 kunmap_atomic(kaddr, KM_USER0);
56390 }
56391
56392 +static inline void sanitize_highpage(struct page *page)
56393 +{
56394 + void *kaddr;
56395 + unsigned long flags;
56396 +
56397 + local_irq_save(flags);
56398 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
56399 + clear_page(kaddr);
56400 + kunmap_atomic(kaddr, KM_CLEARPAGE);
56401 + local_irq_restore(flags);
56402 +}
56403 +
56404 static inline void zero_user_segments(struct page *page,
56405 unsigned start1, unsigned end1,
56406 unsigned start2, unsigned end2)
56407 diff -urNp linux-3.0.4/include/linux/i2c.h linux-3.0.4/include/linux/i2c.h
56408 --- linux-3.0.4/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
56409 +++ linux-3.0.4/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
56410 @@ -346,6 +346,7 @@ struct i2c_algorithm {
56411 /* To determine what the adapter supports */
56412 u32 (*functionality) (struct i2c_adapter *);
56413 };
56414 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
56415
56416 /*
56417 * i2c_adapter is the structure used to identify a physical i2c bus along
56418 diff -urNp linux-3.0.4/include/linux/i2o.h linux-3.0.4/include/linux/i2o.h
56419 --- linux-3.0.4/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
56420 +++ linux-3.0.4/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
56421 @@ -564,7 +564,7 @@ struct i2o_controller {
56422 struct i2o_device *exec; /* Executive */
56423 #if BITS_PER_LONG == 64
56424 spinlock_t context_list_lock; /* lock for context_list */
56425 - atomic_t context_list_counter; /* needed for unique contexts */
56426 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
56427 struct list_head context_list; /* list of context id's
56428 and pointers */
56429 #endif
56430 diff -urNp linux-3.0.4/include/linux/init.h linux-3.0.4/include/linux/init.h
56431 --- linux-3.0.4/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
56432 +++ linux-3.0.4/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
56433 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
56434
56435 /* Each module must use one module_init(). */
56436 #define module_init(initfn) \
56437 - static inline initcall_t __inittest(void) \
56438 + static inline __used initcall_t __inittest(void) \
56439 { return initfn; } \
56440 int init_module(void) __attribute__((alias(#initfn)));
56441
56442 /* This is only required if you want to be unloadable. */
56443 #define module_exit(exitfn) \
56444 - static inline exitcall_t __exittest(void) \
56445 + static inline __used exitcall_t __exittest(void) \
56446 { return exitfn; } \
56447 void cleanup_module(void) __attribute__((alias(#exitfn)));
56448
56449 diff -urNp linux-3.0.4/include/linux/init_task.h linux-3.0.4/include/linux/init_task.h
56450 --- linux-3.0.4/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
56451 +++ linux-3.0.4/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
56452 @@ -126,6 +126,12 @@ extern struct cred init_cred;
56453 # define INIT_PERF_EVENTS(tsk)
56454 #endif
56455
56456 +#ifdef CONFIG_X86
56457 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
56458 +#else
56459 +#define INIT_TASK_THREAD_INFO
56460 +#endif
56461 +
56462 /*
56463 * INIT_TASK is used to set up the first task table, touch at
56464 * your own risk!. Base=0, limit=0x1fffff (=2MB)
56465 @@ -164,6 +170,7 @@ extern struct cred init_cred;
56466 RCU_INIT_POINTER(.cred, &init_cred), \
56467 .comm = "swapper", \
56468 .thread = INIT_THREAD, \
56469 + INIT_TASK_THREAD_INFO \
56470 .fs = &init_fs, \
56471 .files = &init_files, \
56472 .signal = &init_signals, \
56473 diff -urNp linux-3.0.4/include/linux/intel-iommu.h linux-3.0.4/include/linux/intel-iommu.h
56474 --- linux-3.0.4/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
56475 +++ linux-3.0.4/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
56476 @@ -296,7 +296,7 @@ struct iommu_flush {
56477 u8 fm, u64 type);
56478 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
56479 unsigned int size_order, u64 type);
56480 -};
56481 +} __no_const;
56482
56483 enum {
56484 SR_DMAR_FECTL_REG,
56485 diff -urNp linux-3.0.4/include/linux/interrupt.h linux-3.0.4/include/linux/interrupt.h
56486 --- linux-3.0.4/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
56487 +++ linux-3.0.4/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
56488 @@ -422,7 +422,7 @@ enum
56489 /* map softirq index to softirq name. update 'softirq_to_name' in
56490 * kernel/softirq.c when adding a new softirq.
56491 */
56492 -extern char *softirq_to_name[NR_SOFTIRQS];
56493 +extern const char * const softirq_to_name[NR_SOFTIRQS];
56494
56495 /* softirq mask and active fields moved to irq_cpustat_t in
56496 * asm/hardirq.h to get better cache usage. KAO
56497 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56498
56499 struct softirq_action
56500 {
56501 - void (*action)(struct softirq_action *);
56502 + void (*action)(void);
56503 };
56504
56505 asmlinkage void do_softirq(void);
56506 asmlinkage void __do_softirq(void);
56507 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56508 +extern void open_softirq(int nr, void (*action)(void));
56509 extern void softirq_init(void);
56510 static inline void __raise_softirq_irqoff(unsigned int nr)
56511 {
56512 diff -urNp linux-3.0.4/include/linux/kallsyms.h linux-3.0.4/include/linux/kallsyms.h
56513 --- linux-3.0.4/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
56514 +++ linux-3.0.4/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
56515 @@ -15,7 +15,8 @@
56516
56517 struct module;
56518
56519 -#ifdef CONFIG_KALLSYMS
56520 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56521 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56522 /* Lookup the address for a symbol. Returns 0 if not found. */
56523 unsigned long kallsyms_lookup_name(const char *name);
56524
56525 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
56526 /* Stupid that this does nothing, but I didn't create this mess. */
56527 #define __print_symbol(fmt, addr)
56528 #endif /*CONFIG_KALLSYMS*/
56529 +#else /* when included by kallsyms.c, vsnprintf.c, or
56530 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56531 +extern void __print_symbol(const char *fmt, unsigned long address);
56532 +extern int sprint_backtrace(char *buffer, unsigned long address);
56533 +extern int sprint_symbol(char *buffer, unsigned long address);
56534 +const char *kallsyms_lookup(unsigned long addr,
56535 + unsigned long *symbolsize,
56536 + unsigned long *offset,
56537 + char **modname, char *namebuf);
56538 +#endif
56539
56540 /* This macro allows us to keep printk typechecking */
56541 static void __check_printsym_format(const char *fmt, ...)
56542 diff -urNp linux-3.0.4/include/linux/kgdb.h linux-3.0.4/include/linux/kgdb.h
56543 --- linux-3.0.4/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
56544 +++ linux-3.0.4/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
56545 @@ -53,7 +53,7 @@ extern int kgdb_connected;
56546 extern int kgdb_io_module_registered;
56547
56548 extern atomic_t kgdb_setting_breakpoint;
56549 -extern atomic_t kgdb_cpu_doing_single_step;
56550 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56551
56552 extern struct task_struct *kgdb_usethread;
56553 extern struct task_struct *kgdb_contthread;
56554 @@ -251,7 +251,7 @@ struct kgdb_arch {
56555 void (*disable_hw_break)(struct pt_regs *regs);
56556 void (*remove_all_hw_break)(void);
56557 void (*correct_hw_break)(void);
56558 -};
56559 +} __do_const;
56560
56561 /**
56562 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
56563 @@ -276,7 +276,7 @@ struct kgdb_io {
56564 void (*pre_exception) (void);
56565 void (*post_exception) (void);
56566 int is_console;
56567 -};
56568 +} __do_const;
56569
56570 extern struct kgdb_arch arch_kgdb_ops;
56571
56572 diff -urNp linux-3.0.4/include/linux/kmod.h linux-3.0.4/include/linux/kmod.h
56573 --- linux-3.0.4/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
56574 +++ linux-3.0.4/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
56575 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
56576 * usually useless though. */
56577 extern int __request_module(bool wait, const char *name, ...) \
56578 __attribute__((format(printf, 2, 3)));
56579 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56580 + __attribute__((format(printf, 3, 4)));
56581 #define request_module(mod...) __request_module(true, mod)
56582 #define request_module_nowait(mod...) __request_module(false, mod)
56583 #define try_then_request_module(x, mod...) \
56584 diff -urNp linux-3.0.4/include/linux/kvm_host.h linux-3.0.4/include/linux/kvm_host.h
56585 --- linux-3.0.4/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
56586 +++ linux-3.0.4/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
56587 @@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56588 void vcpu_load(struct kvm_vcpu *vcpu);
56589 void vcpu_put(struct kvm_vcpu *vcpu);
56590
56591 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56592 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56593 struct module *module);
56594 void kvm_exit(void);
56595
56596 @@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56597 struct kvm_guest_debug *dbg);
56598 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56599
56600 -int kvm_arch_init(void *opaque);
56601 +int kvm_arch_init(const void *opaque);
56602 void kvm_arch_exit(void);
56603
56604 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56605 diff -urNp linux-3.0.4/include/linux/libata.h linux-3.0.4/include/linux/libata.h
56606 --- linux-3.0.4/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
56607 +++ linux-3.0.4/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
56608 @@ -899,7 +899,7 @@ struct ata_port_operations {
56609 * fields must be pointers.
56610 */
56611 const struct ata_port_operations *inherits;
56612 -};
56613 +} __do_const;
56614
56615 struct ata_port_info {
56616 unsigned long flags;
56617 diff -urNp linux-3.0.4/include/linux/mca.h linux-3.0.4/include/linux/mca.h
56618 --- linux-3.0.4/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
56619 +++ linux-3.0.4/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
56620 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
56621 int region);
56622 void * (*mca_transform_memory)(struct mca_device *,
56623 void *memory);
56624 -};
56625 +} __no_const;
56626
56627 struct mca_bus {
56628 u64 default_dma_mask;
56629 diff -urNp linux-3.0.4/include/linux/memory.h linux-3.0.4/include/linux/memory.h
56630 --- linux-3.0.4/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
56631 +++ linux-3.0.4/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
56632 @@ -144,7 +144,7 @@ struct memory_accessor {
56633 size_t count);
56634 ssize_t (*write)(struct memory_accessor *, const char *buf,
56635 off_t offset, size_t count);
56636 -};
56637 +} __no_const;
56638
56639 /*
56640 * Kernel text modification mutex, used for code patching. Users of this lock
56641 diff -urNp linux-3.0.4/include/linux/mfd/abx500.h linux-3.0.4/include/linux/mfd/abx500.h
56642 --- linux-3.0.4/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
56643 +++ linux-3.0.4/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
56644 @@ -234,6 +234,7 @@ struct abx500_ops {
56645 int (*event_registers_startup_state_get) (struct device *, u8 *);
56646 int (*startup_irq_enabled) (struct device *, unsigned int);
56647 };
56648 +typedef struct abx500_ops __no_const abx500_ops_no_const;
56649
56650 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
56651 void abx500_remove_ops(struct device *dev);
56652 diff -urNp linux-3.0.4/include/linux/mm.h linux-3.0.4/include/linux/mm.h
56653 --- linux-3.0.4/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
56654 +++ linux-3.0.4/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
56655 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
56656
56657 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56658 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56659 +
56660 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56661 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56662 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56663 +#else
56664 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56665 +#endif
56666 +
56667 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56668 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56669
56670 @@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
56671 int set_page_dirty_lock(struct page *page);
56672 int clear_page_dirty_for_io(struct page *page);
56673
56674 -/* Is the vma a continuation of the stack vma above it? */
56675 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
56676 -{
56677 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56678 -}
56679 -
56680 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
56681 - unsigned long addr)
56682 -{
56683 - return (vma->vm_flags & VM_GROWSDOWN) &&
56684 - (vma->vm_start == addr) &&
56685 - !vma_growsdown(vma->vm_prev, addr);
56686 -}
56687 -
56688 -/* Is the vma a continuation of the stack vma below it? */
56689 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
56690 -{
56691 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
56692 -}
56693 -
56694 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
56695 - unsigned long addr)
56696 -{
56697 - return (vma->vm_flags & VM_GROWSUP) &&
56698 - (vma->vm_end == addr) &&
56699 - !vma_growsup(vma->vm_next, addr);
56700 -}
56701 -
56702 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56703 unsigned long old_addr, struct vm_area_struct *new_vma,
56704 unsigned long new_addr, unsigned long len);
56705 @@ -1169,6 +1148,15 @@ struct shrinker {
56706 extern void register_shrinker(struct shrinker *);
56707 extern void unregister_shrinker(struct shrinker *);
56708
56709 +#ifdef CONFIG_MMU
56710 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
56711 +#else
56712 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
56713 +{
56714 + return __pgprot(0);
56715 +}
56716 +#endif
56717 +
56718 int vma_wants_writenotify(struct vm_area_struct *vma);
56719
56720 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
56721 @@ -1452,6 +1440,7 @@ out:
56722 }
56723
56724 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56725 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56726
56727 extern unsigned long do_brk(unsigned long, unsigned long);
56728
56729 @@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
56730 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56731 struct vm_area_struct **pprev);
56732
56733 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56734 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56735 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56736 +
56737 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56738 NULL if none. Assume start_addr < end_addr. */
56739 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56740 @@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
56741 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56742 }
56743
56744 -#ifdef CONFIG_MMU
56745 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56746 -#else
56747 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
56748 -{
56749 - return __pgprot(0);
56750 -}
56751 -#endif
56752 -
56753 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56754 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56755 unsigned long pfn, unsigned long size, pgprot_t);
56756 @@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
56757 extern int sysctl_memory_failure_early_kill;
56758 extern int sysctl_memory_failure_recovery;
56759 extern void shake_page(struct page *p, int access);
56760 -extern atomic_long_t mce_bad_pages;
56761 +extern atomic_long_unchecked_t mce_bad_pages;
56762 extern int soft_offline_page(struct page *page, int flags);
56763
56764 extern void dump_page(struct page *page);
56765 @@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
56766 unsigned int pages_per_huge_page);
56767 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
56768
56769 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56770 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56771 +#else
56772 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56773 +#endif
56774 +
56775 #endif /* __KERNEL__ */
56776 #endif /* _LINUX_MM_H */
56777 diff -urNp linux-3.0.4/include/linux/mm_types.h linux-3.0.4/include/linux/mm_types.h
56778 --- linux-3.0.4/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
56779 +++ linux-3.0.4/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
56780 @@ -184,6 +184,8 @@ struct vm_area_struct {
56781 #ifdef CONFIG_NUMA
56782 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56783 #endif
56784 +
56785 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56786 };
56787
56788 struct core_thread {
56789 @@ -316,6 +318,24 @@ struct mm_struct {
56790 #ifdef CONFIG_CPUMASK_OFFSTACK
56791 struct cpumask cpumask_allocation;
56792 #endif
56793 +
56794 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56795 + unsigned long pax_flags;
56796 +#endif
56797 +
56798 +#ifdef CONFIG_PAX_DLRESOLVE
56799 + unsigned long call_dl_resolve;
56800 +#endif
56801 +
56802 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56803 + unsigned long call_syscall;
56804 +#endif
56805 +
56806 +#ifdef CONFIG_PAX_ASLR
56807 + unsigned long delta_mmap; /* randomized offset */
56808 + unsigned long delta_stack; /* randomized offset */
56809 +#endif
56810 +
56811 };
56812
56813 static inline void mm_init_cpumask(struct mm_struct *mm)
56814 diff -urNp linux-3.0.4/include/linux/mmu_notifier.h linux-3.0.4/include/linux/mmu_notifier.h
56815 --- linux-3.0.4/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
56816 +++ linux-3.0.4/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
56817 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
56818 */
56819 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56820 ({ \
56821 - pte_t __pte; \
56822 + pte_t ___pte; \
56823 struct vm_area_struct *___vma = __vma; \
56824 unsigned long ___address = __address; \
56825 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56826 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56827 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56828 - __pte; \
56829 + ___pte; \
56830 })
56831
56832 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
56833 diff -urNp linux-3.0.4/include/linux/mmzone.h linux-3.0.4/include/linux/mmzone.h
56834 --- linux-3.0.4/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
56835 +++ linux-3.0.4/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
56836 @@ -350,7 +350,7 @@ struct zone {
56837 unsigned long flags; /* zone flags, see below */
56838
56839 /* Zone statistics */
56840 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56841 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56842
56843 /*
56844 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
56845 diff -urNp linux-3.0.4/include/linux/mod_devicetable.h linux-3.0.4/include/linux/mod_devicetable.h
56846 --- linux-3.0.4/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
56847 +++ linux-3.0.4/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
56848 @@ -12,7 +12,7 @@
56849 typedef unsigned long kernel_ulong_t;
56850 #endif
56851
56852 -#define PCI_ANY_ID (~0)
56853 +#define PCI_ANY_ID ((__u16)~0)
56854
56855 struct pci_device_id {
56856 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56857 @@ -131,7 +131,7 @@ struct usb_device_id {
56858 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56859 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56860
56861 -#define HID_ANY_ID (~0)
56862 +#define HID_ANY_ID (~0U)
56863
56864 struct hid_device_id {
56865 __u16 bus;
56866 diff -urNp linux-3.0.4/include/linux/module.h linux-3.0.4/include/linux/module.h
56867 --- linux-3.0.4/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
56868 +++ linux-3.0.4/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
56869 @@ -16,6 +16,7 @@
56870 #include <linux/kobject.h>
56871 #include <linux/moduleparam.h>
56872 #include <linux/tracepoint.h>
56873 +#include <linux/fs.h>
56874
56875 #include <linux/percpu.h>
56876 #include <asm/module.h>
56877 @@ -325,19 +326,16 @@ struct module
56878 int (*init)(void);
56879
56880 /* If this is non-NULL, vfree after init() returns */
56881 - void *module_init;
56882 + void *module_init_rx, *module_init_rw;
56883
56884 /* Here is the actual code + data, vfree'd on unload. */
56885 - void *module_core;
56886 + void *module_core_rx, *module_core_rw;
56887
56888 /* Here are the sizes of the init and core sections */
56889 - unsigned int init_size, core_size;
56890 + unsigned int init_size_rw, core_size_rw;
56891
56892 /* The size of the executable code in each section. */
56893 - unsigned int init_text_size, core_text_size;
56894 -
56895 - /* Size of RO sections of the module (text+rodata) */
56896 - unsigned int init_ro_size, core_ro_size;
56897 + unsigned int init_size_rx, core_size_rx;
56898
56899 /* Arch-specific module values */
56900 struct mod_arch_specific arch;
56901 @@ -393,6 +391,10 @@ struct module
56902 #ifdef CONFIG_EVENT_TRACING
56903 struct ftrace_event_call **trace_events;
56904 unsigned int num_trace_events;
56905 + struct file_operations trace_id;
56906 + struct file_operations trace_enable;
56907 + struct file_operations trace_format;
56908 + struct file_operations trace_filter;
56909 #endif
56910 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
56911 unsigned int num_ftrace_callsites;
56912 @@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
56913 bool is_module_percpu_address(unsigned long addr);
56914 bool is_module_text_address(unsigned long addr);
56915
56916 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56917 +{
56918 +
56919 +#ifdef CONFIG_PAX_KERNEXEC
56920 + if (ktla_ktva(addr) >= (unsigned long)start &&
56921 + ktla_ktva(addr) < (unsigned long)start + size)
56922 + return 1;
56923 +#endif
56924 +
56925 + return ((void *)addr >= start && (void *)addr < start + size);
56926 +}
56927 +
56928 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56929 +{
56930 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56931 +}
56932 +
56933 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56934 +{
56935 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56936 +}
56937 +
56938 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56939 +{
56940 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56941 +}
56942 +
56943 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56944 +{
56945 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56946 +}
56947 +
56948 static inline int within_module_core(unsigned long addr, struct module *mod)
56949 {
56950 - return (unsigned long)mod->module_core <= addr &&
56951 - addr < (unsigned long)mod->module_core + mod->core_size;
56952 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56953 }
56954
56955 static inline int within_module_init(unsigned long addr, struct module *mod)
56956 {
56957 - return (unsigned long)mod->module_init <= addr &&
56958 - addr < (unsigned long)mod->module_init + mod->init_size;
56959 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56960 }
56961
56962 /* Search for module by name: must hold module_mutex. */
56963 diff -urNp linux-3.0.4/include/linux/moduleloader.h linux-3.0.4/include/linux/moduleloader.h
56964 --- linux-3.0.4/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
56965 +++ linux-3.0.4/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
56966 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56967 sections. Returns NULL on failure. */
56968 void *module_alloc(unsigned long size);
56969
56970 +#ifdef CONFIG_PAX_KERNEXEC
56971 +void *module_alloc_exec(unsigned long size);
56972 +#else
56973 +#define module_alloc_exec(x) module_alloc(x)
56974 +#endif
56975 +
56976 /* Free memory returned from module_alloc. */
56977 void module_free(struct module *mod, void *module_region);
56978
56979 +#ifdef CONFIG_PAX_KERNEXEC
56980 +void module_free_exec(struct module *mod, void *module_region);
56981 +#else
56982 +#define module_free_exec(x, y) module_free((x), (y))
56983 +#endif
56984 +
56985 /* Apply the given relocation to the (simplified) ELF. Return -error
56986 or 0. */
56987 int apply_relocate(Elf_Shdr *sechdrs,
56988 diff -urNp linux-3.0.4/include/linux/moduleparam.h linux-3.0.4/include/linux/moduleparam.h
56989 --- linux-3.0.4/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
56990 +++ linux-3.0.4/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
56991 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
56992 * @len is usually just sizeof(string).
56993 */
56994 #define module_param_string(name, string, len, perm) \
56995 - static const struct kparam_string __param_string_##name \
56996 + static const struct kparam_string __param_string_##name __used \
56997 = { len, string }; \
56998 __module_param_call(MODULE_PARAM_PREFIX, name, \
56999 &param_ops_string, \
57000 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
57001 * module_param_named() for why this might be necessary.
57002 */
57003 #define module_param_array_named(name, array, type, nump, perm) \
57004 - static const struct kparam_array __param_arr_##name \
57005 + static const struct kparam_array __param_arr_##name __used \
57006 = { .max = ARRAY_SIZE(array), .num = nump, \
57007 .ops = &param_ops_##type, \
57008 .elemsize = sizeof(array[0]), .elem = array }; \
57009 diff -urNp linux-3.0.4/include/linux/namei.h linux-3.0.4/include/linux/namei.h
57010 --- linux-3.0.4/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
57011 +++ linux-3.0.4/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
57012 @@ -24,7 +24,7 @@ struct nameidata {
57013 unsigned seq;
57014 int last_type;
57015 unsigned depth;
57016 - char *saved_names[MAX_NESTED_LINKS + 1];
57017 + const char *saved_names[MAX_NESTED_LINKS + 1];
57018
57019 /* Intent data */
57020 union {
57021 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
57022 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
57023 extern void unlock_rename(struct dentry *, struct dentry *);
57024
57025 -static inline void nd_set_link(struct nameidata *nd, char *path)
57026 +static inline void nd_set_link(struct nameidata *nd, const char *path)
57027 {
57028 nd->saved_names[nd->depth] = path;
57029 }
57030
57031 -static inline char *nd_get_link(struct nameidata *nd)
57032 +static inline const char *nd_get_link(const struct nameidata *nd)
57033 {
57034 return nd->saved_names[nd->depth];
57035 }
57036 diff -urNp linux-3.0.4/include/linux/netdevice.h linux-3.0.4/include/linux/netdevice.h
57037 --- linux-3.0.4/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
57038 +++ linux-3.0.4/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
57039 @@ -979,6 +979,7 @@ struct net_device_ops {
57040 int (*ndo_set_features)(struct net_device *dev,
57041 u32 features);
57042 };
57043 +typedef struct net_device_ops __no_const net_device_ops_no_const;
57044
57045 /*
57046 * The DEVICE structure.
57047 diff -urNp linux-3.0.4/include/linux/netfilter/xt_gradm.h linux-3.0.4/include/linux/netfilter/xt_gradm.h
57048 --- linux-3.0.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
57049 +++ linux-3.0.4/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
57050 @@ -0,0 +1,9 @@
57051 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
57052 +#define _LINUX_NETFILTER_XT_GRADM_H 1
57053 +
57054 +struct xt_gradm_mtinfo {
57055 + __u16 flags;
57056 + __u16 invflags;
57057 +};
57058 +
57059 +#endif
57060 diff -urNp linux-3.0.4/include/linux/of_pdt.h linux-3.0.4/include/linux/of_pdt.h
57061 --- linux-3.0.4/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
57062 +++ linux-3.0.4/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
57063 @@ -32,7 +32,7 @@ struct of_pdt_ops {
57064
57065 /* return 0 on success; fill in 'len' with number of bytes in path */
57066 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
57067 -};
57068 +} __no_const;
57069
57070 extern void *prom_early_alloc(unsigned long size);
57071
57072 diff -urNp linux-3.0.4/include/linux/oprofile.h linux-3.0.4/include/linux/oprofile.h
57073 --- linux-3.0.4/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
57074 +++ linux-3.0.4/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
57075 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
57076 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
57077 char const * name, ulong * val);
57078
57079 -/** Create a file for read-only access to an atomic_t. */
57080 +/** Create a file for read-only access to an atomic_unchecked_t. */
57081 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
57082 - char const * name, atomic_t * val);
57083 + char const * name, atomic_unchecked_t * val);
57084
57085 /** create a directory */
57086 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
57087 diff -urNp linux-3.0.4/include/linux/padata.h linux-3.0.4/include/linux/padata.h
57088 --- linux-3.0.4/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
57089 +++ linux-3.0.4/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
57090 @@ -129,7 +129,7 @@ struct parallel_data {
57091 struct padata_instance *pinst;
57092 struct padata_parallel_queue __percpu *pqueue;
57093 struct padata_serial_queue __percpu *squeue;
57094 - atomic_t seq_nr;
57095 + atomic_unchecked_t seq_nr;
57096 atomic_t reorder_objects;
57097 atomic_t refcnt;
57098 unsigned int max_seq_nr;
57099 diff -urNp linux-3.0.4/include/linux/perf_event.h linux-3.0.4/include/linux/perf_event.h
57100 --- linux-3.0.4/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
57101 +++ linux-3.0.4/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
57102 @@ -761,8 +761,8 @@ struct perf_event {
57103
57104 enum perf_event_active_state state;
57105 unsigned int attach_state;
57106 - local64_t count;
57107 - atomic64_t child_count;
57108 + local64_t count; /* PaX: fix it one day */
57109 + atomic64_unchecked_t child_count;
57110
57111 /*
57112 * These are the total time in nanoseconds that the event
57113 @@ -813,8 +813,8 @@ struct perf_event {
57114 * These accumulate total time (in nanoseconds) that children
57115 * events have been enabled and running, respectively.
57116 */
57117 - atomic64_t child_total_time_enabled;
57118 - atomic64_t child_total_time_running;
57119 + atomic64_unchecked_t child_total_time_enabled;
57120 + atomic64_unchecked_t child_total_time_running;
57121
57122 /*
57123 * Protect attach/detach and child_list:
57124 diff -urNp linux-3.0.4/include/linux/pipe_fs_i.h linux-3.0.4/include/linux/pipe_fs_i.h
57125 --- linux-3.0.4/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
57126 +++ linux-3.0.4/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
57127 @@ -46,9 +46,9 @@ struct pipe_buffer {
57128 struct pipe_inode_info {
57129 wait_queue_head_t wait;
57130 unsigned int nrbufs, curbuf, buffers;
57131 - unsigned int readers;
57132 - unsigned int writers;
57133 - unsigned int waiting_writers;
57134 + atomic_t readers;
57135 + atomic_t writers;
57136 + atomic_t waiting_writers;
57137 unsigned int r_counter;
57138 unsigned int w_counter;
57139 struct page *tmp_page;
57140 diff -urNp linux-3.0.4/include/linux/pm_runtime.h linux-3.0.4/include/linux/pm_runtime.h
57141 --- linux-3.0.4/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
57142 +++ linux-3.0.4/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
57143 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
57144
57145 static inline void pm_runtime_mark_last_busy(struct device *dev)
57146 {
57147 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
57148 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
57149 }
57150
57151 #else /* !CONFIG_PM_RUNTIME */
57152 diff -urNp linux-3.0.4/include/linux/poison.h linux-3.0.4/include/linux/poison.h
57153 --- linux-3.0.4/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
57154 +++ linux-3.0.4/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
57155 @@ -19,8 +19,8 @@
57156 * under normal circumstances, used to verify that nobody uses
57157 * non-initialized list entries.
57158 */
57159 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
57160 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
57161 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
57162 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
57163
57164 /********** include/linux/timer.h **********/
57165 /*
57166 diff -urNp linux-3.0.4/include/linux/preempt.h linux-3.0.4/include/linux/preempt.h
57167 --- linux-3.0.4/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
57168 +++ linux-3.0.4/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
57169 @@ -115,7 +115,7 @@ struct preempt_ops {
57170 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
57171 void (*sched_out)(struct preempt_notifier *notifier,
57172 struct task_struct *next);
57173 -};
57174 +} __no_const;
57175
57176 /**
57177 * preempt_notifier - key for installing preemption notifiers
57178 diff -urNp linux-3.0.4/include/linux/proc_fs.h linux-3.0.4/include/linux/proc_fs.h
57179 --- linux-3.0.4/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
57180 +++ linux-3.0.4/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
57181 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
57182 return proc_create_data(name, mode, parent, proc_fops, NULL);
57183 }
57184
57185 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
57186 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
57187 +{
57188 +#ifdef CONFIG_GRKERNSEC_PROC_USER
57189 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
57190 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57191 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
57192 +#else
57193 + return proc_create_data(name, mode, parent, proc_fops, NULL);
57194 +#endif
57195 +}
57196 +
57197 +
57198 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
57199 mode_t mode, struct proc_dir_entry *base,
57200 read_proc_t *read_proc, void * data)
57201 @@ -258,7 +271,7 @@ union proc_op {
57202 int (*proc_show)(struct seq_file *m,
57203 struct pid_namespace *ns, struct pid *pid,
57204 struct task_struct *task);
57205 -};
57206 +} __no_const;
57207
57208 struct ctl_table_header;
57209 struct ctl_table;
57210 diff -urNp linux-3.0.4/include/linux/ptrace.h linux-3.0.4/include/linux/ptrace.h
57211 --- linux-3.0.4/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
57212 +++ linux-3.0.4/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
57213 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
57214 extern void exit_ptrace(struct task_struct *tracer);
57215 #define PTRACE_MODE_READ 1
57216 #define PTRACE_MODE_ATTACH 2
57217 -/* Returns 0 on success, -errno on denial. */
57218 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
57219 /* Returns true on success, false on denial. */
57220 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
57221 +/* Returns true on success, false on denial. */
57222 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
57223
57224 static inline int ptrace_reparented(struct task_struct *child)
57225 {
57226 diff -urNp linux-3.0.4/include/linux/random.h linux-3.0.4/include/linux/random.h
57227 --- linux-3.0.4/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
57228 +++ linux-3.0.4/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
57229 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
57230
57231 u32 prandom32(struct rnd_state *);
57232
57233 +static inline unsigned long pax_get_random_long(void)
57234 +{
57235 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
57236 +}
57237 +
57238 /*
57239 * Handle minimum values for seeds
57240 */
57241 static inline u32 __seed(u32 x, u32 m)
57242 {
57243 - return (x < m) ? x + m : x;
57244 + return (x <= m) ? x + m + 1 : x;
57245 }
57246
57247 /**
57248 diff -urNp linux-3.0.4/include/linux/reboot.h linux-3.0.4/include/linux/reboot.h
57249 --- linux-3.0.4/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
57250 +++ linux-3.0.4/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
57251 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
57252 * Architecture-specific implementations of sys_reboot commands.
57253 */
57254
57255 -extern void machine_restart(char *cmd);
57256 -extern void machine_halt(void);
57257 -extern void machine_power_off(void);
57258 +extern void machine_restart(char *cmd) __noreturn;
57259 +extern void machine_halt(void) __noreturn;
57260 +extern void machine_power_off(void) __noreturn;
57261
57262 extern void machine_shutdown(void);
57263 struct pt_regs;
57264 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
57265 */
57266
57267 extern void kernel_restart_prepare(char *cmd);
57268 -extern void kernel_restart(char *cmd);
57269 -extern void kernel_halt(void);
57270 -extern void kernel_power_off(void);
57271 +extern void kernel_restart(char *cmd) __noreturn;
57272 +extern void kernel_halt(void) __noreturn;
57273 +extern void kernel_power_off(void) __noreturn;
57274
57275 extern int C_A_D; /* for sysctl */
57276 void ctrl_alt_del(void);
57277 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
57278 * Emergency restart, callable from an interrupt handler.
57279 */
57280
57281 -extern void emergency_restart(void);
57282 +extern void emergency_restart(void) __noreturn;
57283 #include <asm/emergency-restart.h>
57284
57285 #endif
57286 diff -urNp linux-3.0.4/include/linux/reiserfs_fs.h linux-3.0.4/include/linux/reiserfs_fs.h
57287 --- linux-3.0.4/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
57288 +++ linux-3.0.4/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
57289 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
57290 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
57291
57292 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
57293 -#define get_generation(s) atomic_read (&fs_generation(s))
57294 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
57295 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
57296 #define __fs_changed(gen,s) (gen != get_generation (s))
57297 #define fs_changed(gen,s) \
57298 diff -urNp linux-3.0.4/include/linux/reiserfs_fs_sb.h linux-3.0.4/include/linux/reiserfs_fs_sb.h
57299 --- linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
57300 +++ linux-3.0.4/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
57301 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
57302 /* Comment? -Hans */
57303 wait_queue_head_t s_wait;
57304 /* To be obsoleted soon by per buffer seals.. -Hans */
57305 - atomic_t s_generation_counter; // increased by one every time the
57306 + atomic_unchecked_t s_generation_counter; // increased by one every time the
57307 // tree gets re-balanced
57308 unsigned long s_properties; /* File system properties. Currently holds
57309 on-disk FS format */
57310 diff -urNp linux-3.0.4/include/linux/relay.h linux-3.0.4/include/linux/relay.h
57311 --- linux-3.0.4/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
57312 +++ linux-3.0.4/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
57313 @@ -159,7 +159,7 @@ struct rchan_callbacks
57314 * The callback should return 0 if successful, negative if not.
57315 */
57316 int (*remove_buf_file)(struct dentry *dentry);
57317 -};
57318 +} __no_const;
57319
57320 /*
57321 * CONFIG_RELAY kernel API, kernel/relay.c
57322 diff -urNp linux-3.0.4/include/linux/rfkill.h linux-3.0.4/include/linux/rfkill.h
57323 --- linux-3.0.4/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
57324 +++ linux-3.0.4/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
57325 @@ -147,6 +147,7 @@ struct rfkill_ops {
57326 void (*query)(struct rfkill *rfkill, void *data);
57327 int (*set_block)(void *data, bool blocked);
57328 };
57329 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
57330
57331 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
57332 /**
57333 diff -urNp linux-3.0.4/include/linux/rmap.h linux-3.0.4/include/linux/rmap.h
57334 --- linux-3.0.4/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
57335 +++ linux-3.0.4/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
57336 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
57337 void anon_vma_init(void); /* create anon_vma_cachep */
57338 int anon_vma_prepare(struct vm_area_struct *);
57339 void unlink_anon_vmas(struct vm_area_struct *);
57340 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
57341 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
57342 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
57343 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
57344 void __anon_vma_link(struct vm_area_struct *);
57345
57346 static inline void anon_vma_merge(struct vm_area_struct *vma,
57347 diff -urNp linux-3.0.4/include/linux/sched.h linux-3.0.4/include/linux/sched.h
57348 --- linux-3.0.4/include/linux/sched.h 2011-07-21 22:17:23.000000000 -0400
57349 +++ linux-3.0.4/include/linux/sched.h 2011-08-25 17:22:27.000000000 -0400
57350 @@ -100,6 +100,7 @@ struct bio_list;
57351 struct fs_struct;
57352 struct perf_event_context;
57353 struct blk_plug;
57354 +struct linux_binprm;
57355
57356 /*
57357 * List of flags we want to share for kernel threads,
57358 @@ -380,10 +381,13 @@ struct user_namespace;
57359 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
57360
57361 extern int sysctl_max_map_count;
57362 +extern unsigned long sysctl_heap_stack_gap;
57363
57364 #include <linux/aio.h>
57365
57366 #ifdef CONFIG_MMU
57367 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
57368 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57369 extern void arch_pick_mmap_layout(struct mm_struct *mm);
57370 extern unsigned long
57371 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
57372 @@ -629,6 +633,17 @@ struct signal_struct {
57373 #ifdef CONFIG_TASKSTATS
57374 struct taskstats *stats;
57375 #endif
57376 +
57377 +#ifdef CONFIG_GRKERNSEC
57378 + u32 curr_ip;
57379 + u32 saved_ip;
57380 + u32 gr_saddr;
57381 + u32 gr_daddr;
57382 + u16 gr_sport;
57383 + u16 gr_dport;
57384 + u8 used_accept:1;
57385 +#endif
57386 +
57387 #ifdef CONFIG_AUDIT
57388 unsigned audit_tty;
57389 struct tty_audit_buf *tty_audit_buf;
57390 @@ -710,6 +725,11 @@ struct user_struct {
57391 struct key *session_keyring; /* UID's default session keyring */
57392 #endif
57393
57394 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57395 + unsigned int banned;
57396 + unsigned long ban_expires;
57397 +#endif
57398 +
57399 /* Hash table maintenance information */
57400 struct hlist_node uidhash_node;
57401 uid_t uid;
57402 @@ -1340,8 +1360,8 @@ struct task_struct {
57403 struct list_head thread_group;
57404
57405 struct completion *vfork_done; /* for vfork() */
57406 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
57407 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57408 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
57409 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57410
57411 cputime_t utime, stime, utimescaled, stimescaled;
57412 cputime_t gtime;
57413 @@ -1357,13 +1377,6 @@ struct task_struct {
57414 struct task_cputime cputime_expires;
57415 struct list_head cpu_timers[3];
57416
57417 -/* process credentials */
57418 - const struct cred __rcu *real_cred; /* objective and real subjective task
57419 - * credentials (COW) */
57420 - const struct cred __rcu *cred; /* effective (overridable) subjective task
57421 - * credentials (COW) */
57422 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57423 -
57424 char comm[TASK_COMM_LEN]; /* executable name excluding path
57425 - access with [gs]et_task_comm (which lock
57426 it with task_lock())
57427 @@ -1380,8 +1393,16 @@ struct task_struct {
57428 #endif
57429 /* CPU-specific state of this task */
57430 struct thread_struct thread;
57431 +/* thread_info moved to task_struct */
57432 +#ifdef CONFIG_X86
57433 + struct thread_info tinfo;
57434 +#endif
57435 /* filesystem information */
57436 struct fs_struct *fs;
57437 +
57438 + const struct cred __rcu *cred; /* effective (overridable) subjective task
57439 + * credentials (COW) */
57440 +
57441 /* open file information */
57442 struct files_struct *files;
57443 /* namespaces */
57444 @@ -1428,6 +1449,11 @@ struct task_struct {
57445 struct rt_mutex_waiter *pi_blocked_on;
57446 #endif
57447
57448 +/* process credentials */
57449 + const struct cred __rcu *real_cred; /* objective and real subjective task
57450 + * credentials (COW) */
57451 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57452 +
57453 #ifdef CONFIG_DEBUG_MUTEXES
57454 /* mutex deadlock detection */
57455 struct mutex_waiter *blocked_on;
57456 @@ -1538,6 +1564,21 @@ struct task_struct {
57457 unsigned long default_timer_slack_ns;
57458
57459 struct list_head *scm_work_list;
57460 +
57461 +#ifdef CONFIG_GRKERNSEC
57462 + /* grsecurity */
57463 + struct dentry *gr_chroot_dentry;
57464 + struct acl_subject_label *acl;
57465 + struct acl_role_label *role;
57466 + struct file *exec_file;
57467 + u16 acl_role_id;
57468 + /* is this the task that authenticated to the special role */
57469 + u8 acl_sp_role;
57470 + u8 is_writable;
57471 + u8 brute;
57472 + u8 gr_is_chrooted;
57473 +#endif
57474 +
57475 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57476 /* Index of current stored address in ret_stack */
57477 int curr_ret_stack;
57478 @@ -1572,6 +1613,57 @@ struct task_struct {
57479 #endif
57480 };
57481
57482 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57483 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57484 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57485 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57486 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57487 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57488 +
57489 +#ifdef CONFIG_PAX_SOFTMODE
57490 +extern int pax_softmode;
57491 +#endif
57492 +
57493 +extern int pax_check_flags(unsigned long *);
57494 +
57495 +/* if tsk != current then task_lock must be held on it */
57496 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57497 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
57498 +{
57499 + if (likely(tsk->mm))
57500 + return tsk->mm->pax_flags;
57501 + else
57502 + return 0UL;
57503 +}
57504 +
57505 +/* if tsk != current then task_lock must be held on it */
57506 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57507 +{
57508 + if (likely(tsk->mm)) {
57509 + tsk->mm->pax_flags = flags;
57510 + return 0;
57511 + }
57512 + return -EINVAL;
57513 +}
57514 +#endif
57515 +
57516 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57517 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
57518 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57519 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57520 +#endif
57521 +
57522 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57523 +extern void pax_report_insns(void *pc, void *sp);
57524 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
57525 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
57526 +
57527 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57528 +extern void pax_track_stack(void);
57529 +#else
57530 +static inline void pax_track_stack(void) {}
57531 +#endif
57532 +
57533 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57534 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
57535
57536 @@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
57537 #define PF_DUMPCORE 0x00000200 /* dumped core */
57538 #define PF_SIGNALED 0x00000400 /* killed by a signal */
57539 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
57540 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
57541 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
57542 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
57543 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
57544 @@ -2056,7 +2149,9 @@ void yield(void);
57545 extern struct exec_domain default_exec_domain;
57546
57547 union thread_union {
57548 +#ifndef CONFIG_X86
57549 struct thread_info thread_info;
57550 +#endif
57551 unsigned long stack[THREAD_SIZE/sizeof(long)];
57552 };
57553
57554 @@ -2089,6 +2184,7 @@ extern struct pid_namespace init_pid_ns;
57555 */
57556
57557 extern struct task_struct *find_task_by_vpid(pid_t nr);
57558 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
57559 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
57560 struct pid_namespace *ns);
57561
57562 @@ -2225,7 +2321,7 @@ extern void __cleanup_sighand(struct sig
57563 extern void exit_itimers(struct signal_struct *);
57564 extern void flush_itimer_signals(void);
57565
57566 -extern NORET_TYPE void do_group_exit(int);
57567 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57568
57569 extern void daemonize(const char *, ...);
57570 extern int allow_signal(int);
57571 @@ -2393,13 +2489,17 @@ static inline unsigned long *end_of_stac
57572
57573 #endif
57574
57575 -static inline int object_is_on_stack(void *obj)
57576 +static inline int object_starts_on_stack(void *obj)
57577 {
57578 - void *stack = task_stack_page(current);
57579 + const void *stack = task_stack_page(current);
57580
57581 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57582 }
57583
57584 +#ifdef CONFIG_PAX_USERCOPY
57585 +extern int object_is_on_stack(const void *obj, unsigned long len);
57586 +#endif
57587 +
57588 extern void thread_info_cache_init(void);
57589
57590 #ifdef CONFIG_DEBUG_STACK_USAGE
57591 diff -urNp linux-3.0.4/include/linux/screen_info.h linux-3.0.4/include/linux/screen_info.h
57592 --- linux-3.0.4/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
57593 +++ linux-3.0.4/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
57594 @@ -43,7 +43,8 @@ struct screen_info {
57595 __u16 pages; /* 0x32 */
57596 __u16 vesa_attributes; /* 0x34 */
57597 __u32 capabilities; /* 0x36 */
57598 - __u8 _reserved[6]; /* 0x3a */
57599 + __u16 vesapm_size; /* 0x3a */
57600 + __u8 _reserved[4]; /* 0x3c */
57601 } __attribute__((packed));
57602
57603 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57604 diff -urNp linux-3.0.4/include/linux/security.h linux-3.0.4/include/linux/security.h
57605 --- linux-3.0.4/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
57606 +++ linux-3.0.4/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
57607 @@ -36,6 +36,7 @@
57608 #include <linux/key.h>
57609 #include <linux/xfrm.h>
57610 #include <linux/slab.h>
57611 +#include <linux/grsecurity.h>
57612 #include <net/flow.h>
57613
57614 /* Maximum number of letters for an LSM name string */
57615 diff -urNp linux-3.0.4/include/linux/seq_file.h linux-3.0.4/include/linux/seq_file.h
57616 --- linux-3.0.4/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
57617 +++ linux-3.0.4/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
57618 @@ -32,6 +32,7 @@ struct seq_operations {
57619 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
57620 int (*show) (struct seq_file *m, void *v);
57621 };
57622 +typedef struct seq_operations __no_const seq_operations_no_const;
57623
57624 #define SEQ_SKIP 1
57625
57626 diff -urNp linux-3.0.4/include/linux/shmem_fs.h linux-3.0.4/include/linux/shmem_fs.h
57627 --- linux-3.0.4/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
57628 +++ linux-3.0.4/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
57629 @@ -10,7 +10,7 @@
57630
57631 #define SHMEM_NR_DIRECT 16
57632
57633 -#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
57634 +#define SHMEM_SYMLINK_INLINE_LEN 64
57635
57636 struct shmem_inode_info {
57637 spinlock_t lock;
57638 diff -urNp linux-3.0.4/include/linux/shm.h linux-3.0.4/include/linux/shm.h
57639 --- linux-3.0.4/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
57640 +++ linux-3.0.4/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
57641 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57642 pid_t shm_cprid;
57643 pid_t shm_lprid;
57644 struct user_struct *mlock_user;
57645 +#ifdef CONFIG_GRKERNSEC
57646 + time_t shm_createtime;
57647 + pid_t shm_lapid;
57648 +#endif
57649 };
57650
57651 /* shm_mode upper byte flags */
57652 diff -urNp linux-3.0.4/include/linux/skbuff.h linux-3.0.4/include/linux/skbuff.h
57653 --- linux-3.0.4/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
57654 +++ linux-3.0.4/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
57655 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
57656 */
57657 static inline int skb_queue_empty(const struct sk_buff_head *list)
57658 {
57659 - return list->next == (struct sk_buff *)list;
57660 + return list->next == (const struct sk_buff *)list;
57661 }
57662
57663 /**
57664 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
57665 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57666 const struct sk_buff *skb)
57667 {
57668 - return skb->next == (struct sk_buff *)list;
57669 + return skb->next == (const struct sk_buff *)list;
57670 }
57671
57672 /**
57673 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
57674 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57675 const struct sk_buff *skb)
57676 {
57677 - return skb->prev == (struct sk_buff *)list;
57678 + return skb->prev == (const struct sk_buff *)list;
57679 }
57680
57681 /**
57682 @@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
57683 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
57684 */
57685 #ifndef NET_SKB_PAD
57686 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
57687 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
57688 #endif
57689
57690 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57691 diff -urNp linux-3.0.4/include/linux/slab_def.h linux-3.0.4/include/linux/slab_def.h
57692 --- linux-3.0.4/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
57693 +++ linux-3.0.4/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
57694 @@ -96,10 +96,10 @@ struct kmem_cache {
57695 unsigned long node_allocs;
57696 unsigned long node_frees;
57697 unsigned long node_overflow;
57698 - atomic_t allochit;
57699 - atomic_t allocmiss;
57700 - atomic_t freehit;
57701 - atomic_t freemiss;
57702 + atomic_unchecked_t allochit;
57703 + atomic_unchecked_t allocmiss;
57704 + atomic_unchecked_t freehit;
57705 + atomic_unchecked_t freemiss;
57706
57707 /*
57708 * If debugging is enabled, then the allocator can add additional
57709 diff -urNp linux-3.0.4/include/linux/slab.h linux-3.0.4/include/linux/slab.h
57710 --- linux-3.0.4/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
57711 +++ linux-3.0.4/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
57712 @@ -11,12 +11,20 @@
57713
57714 #include <linux/gfp.h>
57715 #include <linux/types.h>
57716 +#include <linux/err.h>
57717
57718 /*
57719 * Flags to pass to kmem_cache_create().
57720 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57721 */
57722 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57723 +
57724 +#ifdef CONFIG_PAX_USERCOPY
57725 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57726 +#else
57727 +#define SLAB_USERCOPY 0x00000000UL
57728 +#endif
57729 +
57730 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57731 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57732 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57733 @@ -87,10 +95,13 @@
57734 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57735 * Both make kfree a no-op.
57736 */
57737 -#define ZERO_SIZE_PTR ((void *)16)
57738 +#define ZERO_SIZE_PTR \
57739 +({ \
57740 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57741 + (void *)(-MAX_ERRNO-1L); \
57742 +})
57743
57744 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57745 - (unsigned long)ZERO_SIZE_PTR)
57746 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57747
57748 /*
57749 * struct kmem_cache related prototypes
57750 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
57751 void kfree(const void *);
57752 void kzfree(const void *);
57753 size_t ksize(const void *);
57754 +void check_object_size(const void *ptr, unsigned long n, bool to);
57755
57756 /*
57757 * Allocator specific definitions. These are mainly used to establish optimized
57758 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
57759
57760 void __init kmem_cache_init_late(void);
57761
57762 +#define kmalloc(x, y) \
57763 +({ \
57764 + void *___retval; \
57765 + intoverflow_t ___x = (intoverflow_t)x; \
57766 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
57767 + ___retval = NULL; \
57768 + else \
57769 + ___retval = kmalloc((size_t)___x, (y)); \
57770 + ___retval; \
57771 +})
57772 +
57773 +#define kmalloc_node(x, y, z) \
57774 +({ \
57775 + void *___retval; \
57776 + intoverflow_t ___x = (intoverflow_t)x; \
57777 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57778 + ___retval = NULL; \
57779 + else \
57780 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57781 + ___retval; \
57782 +})
57783 +
57784 +#define kzalloc(x, y) \
57785 +({ \
57786 + void *___retval; \
57787 + intoverflow_t ___x = (intoverflow_t)x; \
57788 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
57789 + ___retval = NULL; \
57790 + else \
57791 + ___retval = kzalloc((size_t)___x, (y)); \
57792 + ___retval; \
57793 +})
57794 +
57795 +#define __krealloc(x, y, z) \
57796 +({ \
57797 + void *___retval; \
57798 + intoverflow_t ___y = (intoverflow_t)y; \
57799 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
57800 + ___retval = NULL; \
57801 + else \
57802 + ___retval = __krealloc((x), (size_t)___y, (z)); \
57803 + ___retval; \
57804 +})
57805 +
57806 +#define krealloc(x, y, z) \
57807 +({ \
57808 + void *___retval; \
57809 + intoverflow_t ___y = (intoverflow_t)y; \
57810 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
57811 + ___retval = NULL; \
57812 + else \
57813 + ___retval = krealloc((x), (size_t)___y, (z)); \
57814 + ___retval; \
57815 +})
57816 +
57817 #endif /* _LINUX_SLAB_H */
57818 diff -urNp linux-3.0.4/include/linux/slub_def.h linux-3.0.4/include/linux/slub_def.h
57819 --- linux-3.0.4/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
57820 +++ linux-3.0.4/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
57821 @@ -82,7 +82,7 @@ struct kmem_cache {
57822 struct kmem_cache_order_objects max;
57823 struct kmem_cache_order_objects min;
57824 gfp_t allocflags; /* gfp flags to use on each alloc */
57825 - int refcount; /* Refcount for slab cache destroy */
57826 + atomic_t refcount; /* Refcount for slab cache destroy */
57827 void (*ctor)(void *);
57828 int inuse; /* Offset to metadata */
57829 int align; /* Alignment */
57830 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
57831 }
57832
57833 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57834 -void *__kmalloc(size_t size, gfp_t flags);
57835 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
57836
57837 static __always_inline void *
57838 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
57839 diff -urNp linux-3.0.4/include/linux/sonet.h linux-3.0.4/include/linux/sonet.h
57840 --- linux-3.0.4/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
57841 +++ linux-3.0.4/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
57842 @@ -61,7 +61,7 @@ struct sonet_stats {
57843 #include <asm/atomic.h>
57844
57845 struct k_sonet_stats {
57846 -#define __HANDLE_ITEM(i) atomic_t i
57847 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57848 __SONET_ITEMS
57849 #undef __HANDLE_ITEM
57850 };
57851 diff -urNp linux-3.0.4/include/linux/sunrpc/clnt.h linux-3.0.4/include/linux/sunrpc/clnt.h
57852 --- linux-3.0.4/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
57853 +++ linux-3.0.4/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
57854 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
57855 {
57856 switch (sap->sa_family) {
57857 case AF_INET:
57858 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57859 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57860 case AF_INET6:
57861 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57862 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57863 }
57864 return 0;
57865 }
57866 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
57867 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57868 const struct sockaddr *src)
57869 {
57870 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57871 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57872 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57873
57874 dsin->sin_family = ssin->sin_family;
57875 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
57876 if (sa->sa_family != AF_INET6)
57877 return 0;
57878
57879 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57880 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57881 }
57882
57883 #endif /* __KERNEL__ */
57884 diff -urNp linux-3.0.4/include/linux/sunrpc/svc_rdma.h linux-3.0.4/include/linux/sunrpc/svc_rdma.h
57885 --- linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
57886 +++ linux-3.0.4/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
57887 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57888 extern unsigned int svcrdma_max_requests;
57889 extern unsigned int svcrdma_max_req_size;
57890
57891 -extern atomic_t rdma_stat_recv;
57892 -extern atomic_t rdma_stat_read;
57893 -extern atomic_t rdma_stat_write;
57894 -extern atomic_t rdma_stat_sq_starve;
57895 -extern atomic_t rdma_stat_rq_starve;
57896 -extern atomic_t rdma_stat_rq_poll;
57897 -extern atomic_t rdma_stat_rq_prod;
57898 -extern atomic_t rdma_stat_sq_poll;
57899 -extern atomic_t rdma_stat_sq_prod;
57900 +extern atomic_unchecked_t rdma_stat_recv;
57901 +extern atomic_unchecked_t rdma_stat_read;
57902 +extern atomic_unchecked_t rdma_stat_write;
57903 +extern atomic_unchecked_t rdma_stat_sq_starve;
57904 +extern atomic_unchecked_t rdma_stat_rq_starve;
57905 +extern atomic_unchecked_t rdma_stat_rq_poll;
57906 +extern atomic_unchecked_t rdma_stat_rq_prod;
57907 +extern atomic_unchecked_t rdma_stat_sq_poll;
57908 +extern atomic_unchecked_t rdma_stat_sq_prod;
57909
57910 #define RPCRDMA_VERSION 1
57911
57912 diff -urNp linux-3.0.4/include/linux/sysctl.h linux-3.0.4/include/linux/sysctl.h
57913 --- linux-3.0.4/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
57914 +++ linux-3.0.4/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
57915 @@ -155,7 +155,11 @@ enum
57916 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57917 };
57918
57919 -
57920 +#ifdef CONFIG_PAX_SOFTMODE
57921 +enum {
57922 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57923 +};
57924 +#endif
57925
57926 /* CTL_VM names: */
57927 enum
57928 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
57929
57930 extern int proc_dostring(struct ctl_table *, int,
57931 void __user *, size_t *, loff_t *);
57932 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57933 + void __user *, size_t *, loff_t *);
57934 extern int proc_dointvec(struct ctl_table *, int,
57935 void __user *, size_t *, loff_t *);
57936 extern int proc_dointvec_minmax(struct ctl_table *, int,
57937 diff -urNp linux-3.0.4/include/linux/tty_ldisc.h linux-3.0.4/include/linux/tty_ldisc.h
57938 --- linux-3.0.4/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
57939 +++ linux-3.0.4/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
57940 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
57941
57942 struct module *owner;
57943
57944 - int refcount;
57945 + atomic_t refcount;
57946 };
57947
57948 struct tty_ldisc {
57949 diff -urNp linux-3.0.4/include/linux/types.h linux-3.0.4/include/linux/types.h
57950 --- linux-3.0.4/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
57951 +++ linux-3.0.4/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
57952 @@ -213,10 +213,26 @@ typedef struct {
57953 int counter;
57954 } atomic_t;
57955
57956 +#ifdef CONFIG_PAX_REFCOUNT
57957 +typedef struct {
57958 + int counter;
57959 +} atomic_unchecked_t;
57960 +#else
57961 +typedef atomic_t atomic_unchecked_t;
57962 +#endif
57963 +
57964 #ifdef CONFIG_64BIT
57965 typedef struct {
57966 long counter;
57967 } atomic64_t;
57968 +
57969 +#ifdef CONFIG_PAX_REFCOUNT
57970 +typedef struct {
57971 + long counter;
57972 +} atomic64_unchecked_t;
57973 +#else
57974 +typedef atomic64_t atomic64_unchecked_t;
57975 +#endif
57976 #endif
57977
57978 struct list_head {
57979 diff -urNp linux-3.0.4/include/linux/uaccess.h linux-3.0.4/include/linux/uaccess.h
57980 --- linux-3.0.4/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
57981 +++ linux-3.0.4/include/linux/uaccess.h 2011-08-23 21:47:56.000000000 -0400
57982 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57983 long ret; \
57984 mm_segment_t old_fs = get_fs(); \
57985 \
57986 - set_fs(KERNEL_DS); \
57987 pagefault_disable(); \
57988 + set_fs(KERNEL_DS); \
57989 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57990 - pagefault_enable(); \
57991 set_fs(old_fs); \
57992 + pagefault_enable(); \
57993 ret; \
57994 })
57995
57996 diff -urNp linux-3.0.4/include/linux/unaligned/access_ok.h linux-3.0.4/include/linux/unaligned/access_ok.h
57997 --- linux-3.0.4/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
57998 +++ linux-3.0.4/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
57999 @@ -6,32 +6,32 @@
58000
58001 static inline u16 get_unaligned_le16(const void *p)
58002 {
58003 - return le16_to_cpup((__le16 *)p);
58004 + return le16_to_cpup((const __le16 *)p);
58005 }
58006
58007 static inline u32 get_unaligned_le32(const void *p)
58008 {
58009 - return le32_to_cpup((__le32 *)p);
58010 + return le32_to_cpup((const __le32 *)p);
58011 }
58012
58013 static inline u64 get_unaligned_le64(const void *p)
58014 {
58015 - return le64_to_cpup((__le64 *)p);
58016 + return le64_to_cpup((const __le64 *)p);
58017 }
58018
58019 static inline u16 get_unaligned_be16(const void *p)
58020 {
58021 - return be16_to_cpup((__be16 *)p);
58022 + return be16_to_cpup((const __be16 *)p);
58023 }
58024
58025 static inline u32 get_unaligned_be32(const void *p)
58026 {
58027 - return be32_to_cpup((__be32 *)p);
58028 + return be32_to_cpup((const __be32 *)p);
58029 }
58030
58031 static inline u64 get_unaligned_be64(const void *p)
58032 {
58033 - return be64_to_cpup((__be64 *)p);
58034 + return be64_to_cpup((const __be64 *)p);
58035 }
58036
58037 static inline void put_unaligned_le16(u16 val, void *p)
58038 diff -urNp linux-3.0.4/include/linux/vmalloc.h linux-3.0.4/include/linux/vmalloc.h
58039 --- linux-3.0.4/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
58040 +++ linux-3.0.4/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
58041 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
58042 #define VM_MAP 0x00000004 /* vmap()ed pages */
58043 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
58044 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
58045 +
58046 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58047 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
58048 +#endif
58049 +
58050 /* bits [20..32] reserved for arch specific ioremap internals */
58051
58052 /*
58053 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
58054 # endif
58055 #endif
58056
58057 +#define vmalloc(x) \
58058 +({ \
58059 + void *___retval; \
58060 + intoverflow_t ___x = (intoverflow_t)x; \
58061 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
58062 + ___retval = NULL; \
58063 + else \
58064 + ___retval = vmalloc((unsigned long)___x); \
58065 + ___retval; \
58066 +})
58067 +
58068 +#define vzalloc(x) \
58069 +({ \
58070 + void *___retval; \
58071 + intoverflow_t ___x = (intoverflow_t)x; \
58072 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
58073 + ___retval = NULL; \
58074 + else \
58075 + ___retval = vzalloc((unsigned long)___x); \
58076 + ___retval; \
58077 +})
58078 +
58079 +#define __vmalloc(x, y, z) \
58080 +({ \
58081 + void *___retval; \
58082 + intoverflow_t ___x = (intoverflow_t)x; \
58083 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
58084 + ___retval = NULL; \
58085 + else \
58086 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
58087 + ___retval; \
58088 +})
58089 +
58090 +#define vmalloc_user(x) \
58091 +({ \
58092 + void *___retval; \
58093 + intoverflow_t ___x = (intoverflow_t)x; \
58094 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
58095 + ___retval = NULL; \
58096 + else \
58097 + ___retval = vmalloc_user((unsigned long)___x); \
58098 + ___retval; \
58099 +})
58100 +
58101 +#define vmalloc_exec(x) \
58102 +({ \
58103 + void *___retval; \
58104 + intoverflow_t ___x = (intoverflow_t)x; \
58105 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
58106 + ___retval = NULL; \
58107 + else \
58108 + ___retval = vmalloc_exec((unsigned long)___x); \
58109 + ___retval; \
58110 +})
58111 +
58112 +#define vmalloc_node(x, y) \
58113 +({ \
58114 + void *___retval; \
58115 + intoverflow_t ___x = (intoverflow_t)x; \
58116 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
58117 + ___retval = NULL; \
58118 + else \
58119 + ___retval = vmalloc_node((unsigned long)___x, (y));\
58120 + ___retval; \
58121 +})
58122 +
58123 +#define vzalloc_node(x, y) \
58124 +({ \
58125 + void *___retval; \
58126 + intoverflow_t ___x = (intoverflow_t)x; \
58127 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
58128 + ___retval = NULL; \
58129 + else \
58130 + ___retval = vzalloc_node((unsigned long)___x, (y));\
58131 + ___retval; \
58132 +})
58133 +
58134 +#define vmalloc_32(x) \
58135 +({ \
58136 + void *___retval; \
58137 + intoverflow_t ___x = (intoverflow_t)x; \
58138 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
58139 + ___retval = NULL; \
58140 + else \
58141 + ___retval = vmalloc_32((unsigned long)___x); \
58142 + ___retval; \
58143 +})
58144 +
58145 +#define vmalloc_32_user(x) \
58146 +({ \
58147 +void *___retval; \
58148 + intoverflow_t ___x = (intoverflow_t)x; \
58149 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
58150 + ___retval = NULL; \
58151 + else \
58152 + ___retval = vmalloc_32_user((unsigned long)___x);\
58153 + ___retval; \
58154 +})
58155 +
58156 #endif /* _LINUX_VMALLOC_H */
58157 diff -urNp linux-3.0.4/include/linux/vmstat.h linux-3.0.4/include/linux/vmstat.h
58158 --- linux-3.0.4/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
58159 +++ linux-3.0.4/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
58160 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
58161 /*
58162 * Zone based page accounting with per cpu differentials.
58163 */
58164 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58165 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58166
58167 static inline void zone_page_state_add(long x, struct zone *zone,
58168 enum zone_stat_item item)
58169 {
58170 - atomic_long_add(x, &zone->vm_stat[item]);
58171 - atomic_long_add(x, &vm_stat[item]);
58172 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
58173 + atomic_long_add_unchecked(x, &vm_stat[item]);
58174 }
58175
58176 static inline unsigned long global_page_state(enum zone_stat_item item)
58177 {
58178 - long x = atomic_long_read(&vm_stat[item]);
58179 + long x = atomic_long_read_unchecked(&vm_stat[item]);
58180 #ifdef CONFIG_SMP
58181 if (x < 0)
58182 x = 0;
58183 @@ -109,7 +109,7 @@ static inline unsigned long global_page_
58184 static inline unsigned long zone_page_state(struct zone *zone,
58185 enum zone_stat_item item)
58186 {
58187 - long x = atomic_long_read(&zone->vm_stat[item]);
58188 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58189 #ifdef CONFIG_SMP
58190 if (x < 0)
58191 x = 0;
58192 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
58193 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
58194 enum zone_stat_item item)
58195 {
58196 - long x = atomic_long_read(&zone->vm_stat[item]);
58197 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58198
58199 #ifdef CONFIG_SMP
58200 int cpu;
58201 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
58202
58203 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
58204 {
58205 - atomic_long_inc(&zone->vm_stat[item]);
58206 - atomic_long_inc(&vm_stat[item]);
58207 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
58208 + atomic_long_inc_unchecked(&vm_stat[item]);
58209 }
58210
58211 static inline void __inc_zone_page_state(struct page *page,
58212 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
58213
58214 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
58215 {
58216 - atomic_long_dec(&zone->vm_stat[item]);
58217 - atomic_long_dec(&vm_stat[item]);
58218 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
58219 + atomic_long_dec_unchecked(&vm_stat[item]);
58220 }
58221
58222 static inline void __dec_zone_page_state(struct page *page,
58223 diff -urNp linux-3.0.4/include/media/saa7146_vv.h linux-3.0.4/include/media/saa7146_vv.h
58224 --- linux-3.0.4/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
58225 +++ linux-3.0.4/include/media/saa7146_vv.h 2011-08-24 18:26:09.000000000 -0400
58226 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
58227 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
58228
58229 /* the extension can override this */
58230 - struct v4l2_ioctl_ops ops;
58231 + v4l2_ioctl_ops_no_const ops;
58232 /* pointer to the saa7146 core ops */
58233 const struct v4l2_ioctl_ops *core_ops;
58234
58235 diff -urNp linux-3.0.4/include/media/v4l2-ioctl.h linux-3.0.4/include/media/v4l2-ioctl.h
58236 --- linux-3.0.4/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
58237 +++ linux-3.0.4/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
58238 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
58239 long (*vidioc_default) (struct file *file, void *fh,
58240 bool valid_prio, int cmd, void *arg);
58241 };
58242 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
58243
58244
58245 /* v4l debugging and diagnostics */
58246 diff -urNp linux-3.0.4/include/net/caif/cfctrl.h linux-3.0.4/include/net/caif/cfctrl.h
58247 --- linux-3.0.4/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
58248 +++ linux-3.0.4/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
58249 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
58250 void (*radioset_rsp)(void);
58251 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
58252 struct cflayer *client_layer);
58253 -};
58254 +} __no_const;
58255
58256 /* Link Setup Parameters for CAIF-Links. */
58257 struct cfctrl_link_param {
58258 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
58259 struct cfctrl {
58260 struct cfsrvl serv;
58261 struct cfctrl_rsp res;
58262 - atomic_t req_seq_no;
58263 - atomic_t rsp_seq_no;
58264 + atomic_unchecked_t req_seq_no;
58265 + atomic_unchecked_t rsp_seq_no;
58266 struct list_head list;
58267 /* Protects from simultaneous access to first_req list */
58268 spinlock_t info_list_lock;
58269 diff -urNp linux-3.0.4/include/net/flow.h linux-3.0.4/include/net/flow.h
58270 --- linux-3.0.4/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
58271 +++ linux-3.0.4/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
58272 @@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
58273 u8 dir, flow_resolve_t resolver, void *ctx);
58274
58275 extern void flow_cache_flush(void);
58276 -extern atomic_t flow_cache_genid;
58277 +extern atomic_unchecked_t flow_cache_genid;
58278
58279 #endif
58280 diff -urNp linux-3.0.4/include/net/inetpeer.h linux-3.0.4/include/net/inetpeer.h
58281 --- linux-3.0.4/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
58282 +++ linux-3.0.4/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
58283 @@ -43,8 +43,8 @@ struct inet_peer {
58284 */
58285 union {
58286 struct {
58287 - atomic_t rid; /* Frag reception counter */
58288 - atomic_t ip_id_count; /* IP ID for the next packet */
58289 + atomic_unchecked_t rid; /* Frag reception counter */
58290 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
58291 __u32 tcp_ts;
58292 __u32 tcp_ts_stamp;
58293 u32 metrics[RTAX_MAX];
58294 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
58295 {
58296 more++;
58297 inet_peer_refcheck(p);
58298 - return atomic_add_return(more, &p->ip_id_count) - more;
58299 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
58300 }
58301
58302 #endif /* _NET_INETPEER_H */
58303 diff -urNp linux-3.0.4/include/net/ip_fib.h linux-3.0.4/include/net/ip_fib.h
58304 --- linux-3.0.4/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
58305 +++ linux-3.0.4/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
58306 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
58307
58308 #define FIB_RES_SADDR(net, res) \
58309 ((FIB_RES_NH(res).nh_saddr_genid == \
58310 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
58311 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
58312 FIB_RES_NH(res).nh_saddr : \
58313 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
58314 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
58315 diff -urNp linux-3.0.4/include/net/ip_vs.h linux-3.0.4/include/net/ip_vs.h
58316 --- linux-3.0.4/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
58317 +++ linux-3.0.4/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
58318 @@ -509,7 +509,7 @@ struct ip_vs_conn {
58319 struct ip_vs_conn *control; /* Master control connection */
58320 atomic_t n_control; /* Number of controlled ones */
58321 struct ip_vs_dest *dest; /* real server */
58322 - atomic_t in_pkts; /* incoming packet counter */
58323 + atomic_unchecked_t in_pkts; /* incoming packet counter */
58324
58325 /* packet transmitter for different forwarding methods. If it
58326 mangles the packet, it must return NF_DROP or better NF_STOLEN,
58327 @@ -647,7 +647,7 @@ struct ip_vs_dest {
58328 __be16 port; /* port number of the server */
58329 union nf_inet_addr addr; /* IP address of the server */
58330 volatile unsigned flags; /* dest status flags */
58331 - atomic_t conn_flags; /* flags to copy to conn */
58332 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
58333 atomic_t weight; /* server weight */
58334
58335 atomic_t refcnt; /* reference counter */
58336 diff -urNp linux-3.0.4/include/net/irda/ircomm_core.h linux-3.0.4/include/net/irda/ircomm_core.h
58337 --- linux-3.0.4/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
58338 +++ linux-3.0.4/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
58339 @@ -51,7 +51,7 @@ typedef struct {
58340 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
58341 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
58342 struct ircomm_info *);
58343 -} call_t;
58344 +} __no_const call_t;
58345
58346 struct ircomm_cb {
58347 irda_queue_t queue;
58348 diff -urNp linux-3.0.4/include/net/irda/ircomm_tty.h linux-3.0.4/include/net/irda/ircomm_tty.h
58349 --- linux-3.0.4/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
58350 +++ linux-3.0.4/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
58351 @@ -35,6 +35,7 @@
58352 #include <linux/termios.h>
58353 #include <linux/timer.h>
58354 #include <linux/tty.h> /* struct tty_struct */
58355 +#include <asm/local.h>
58356
58357 #include <net/irda/irias_object.h>
58358 #include <net/irda/ircomm_core.h>
58359 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58360 unsigned short close_delay;
58361 unsigned short closing_wait; /* time to wait before closing */
58362
58363 - int open_count;
58364 - int blocked_open; /* # of blocked opens */
58365 + local_t open_count;
58366 + local_t blocked_open; /* # of blocked opens */
58367
58368 /* Protect concurent access to :
58369 * o self->open_count
58370 diff -urNp linux-3.0.4/include/net/iucv/af_iucv.h linux-3.0.4/include/net/iucv/af_iucv.h
58371 --- linux-3.0.4/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
58372 +++ linux-3.0.4/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
58373 @@ -87,7 +87,7 @@ struct iucv_sock {
58374 struct iucv_sock_list {
58375 struct hlist_head head;
58376 rwlock_t lock;
58377 - atomic_t autobind_name;
58378 + atomic_unchecked_t autobind_name;
58379 };
58380
58381 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
58382 diff -urNp linux-3.0.4/include/net/lapb.h linux-3.0.4/include/net/lapb.h
58383 --- linux-3.0.4/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
58384 +++ linux-3.0.4/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
58385 @@ -95,7 +95,7 @@ struct lapb_cb {
58386 struct sk_buff_head write_queue;
58387 struct sk_buff_head ack_queue;
58388 unsigned char window;
58389 - struct lapb_register_struct callbacks;
58390 + struct lapb_register_struct *callbacks;
58391
58392 /* FRMR control information */
58393 struct lapb_frame frmr_data;
58394 diff -urNp linux-3.0.4/include/net/neighbour.h linux-3.0.4/include/net/neighbour.h
58395 --- linux-3.0.4/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
58396 +++ linux-3.0.4/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
58397 @@ -124,7 +124,7 @@ struct neigh_ops {
58398 int (*connected_output)(struct sk_buff*);
58399 int (*hh_output)(struct sk_buff*);
58400 int (*queue_xmit)(struct sk_buff*);
58401 -};
58402 +} __do_const;
58403
58404 struct pneigh_entry {
58405 struct pneigh_entry *next;
58406 diff -urNp linux-3.0.4/include/net/netlink.h linux-3.0.4/include/net/netlink.h
58407 --- linux-3.0.4/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
58408 +++ linux-3.0.4/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
58409 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
58410 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
58411 {
58412 if (mark)
58413 - skb_trim(skb, (unsigned char *) mark - skb->data);
58414 + skb_trim(skb, (const unsigned char *) mark - skb->data);
58415 }
58416
58417 /**
58418 diff -urNp linux-3.0.4/include/net/netns/ipv4.h linux-3.0.4/include/net/netns/ipv4.h
58419 --- linux-3.0.4/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
58420 +++ linux-3.0.4/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
58421 @@ -56,8 +56,8 @@ struct netns_ipv4 {
58422
58423 unsigned int sysctl_ping_group_range[2];
58424
58425 - atomic_t rt_genid;
58426 - atomic_t dev_addr_genid;
58427 + atomic_unchecked_t rt_genid;
58428 + atomic_unchecked_t dev_addr_genid;
58429
58430 #ifdef CONFIG_IP_MROUTE
58431 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
58432 diff -urNp linux-3.0.4/include/net/sctp/sctp.h linux-3.0.4/include/net/sctp/sctp.h
58433 --- linux-3.0.4/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
58434 +++ linux-3.0.4/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
58435 @@ -315,9 +315,9 @@ do { \
58436
58437 #else /* SCTP_DEBUG */
58438
58439 -#define SCTP_DEBUG_PRINTK(whatever...)
58440 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
58441 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
58442 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
58443 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
58444 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
58445 #define SCTP_ENABLE_DEBUG
58446 #define SCTP_DISABLE_DEBUG
58447 #define SCTP_ASSERT(expr, str, func)
58448 diff -urNp linux-3.0.4/include/net/sock.h linux-3.0.4/include/net/sock.h
58449 --- linux-3.0.4/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
58450 +++ linux-3.0.4/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
58451 @@ -277,7 +277,7 @@ struct sock {
58452 #ifdef CONFIG_RPS
58453 __u32 sk_rxhash;
58454 #endif
58455 - atomic_t sk_drops;
58456 + atomic_unchecked_t sk_drops;
58457 int sk_rcvbuf;
58458
58459 struct sk_filter __rcu *sk_filter;
58460 @@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
58461 }
58462
58463 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
58464 - char __user *from, char *to,
58465 + char __user *from, unsigned char *to,
58466 int copy, int offset)
58467 {
58468 if (skb->ip_summed == CHECKSUM_NONE) {
58469 diff -urNp linux-3.0.4/include/net/tcp.h linux-3.0.4/include/net/tcp.h
58470 --- linux-3.0.4/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
58471 +++ linux-3.0.4/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
58472 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
58473 struct tcp_seq_afinfo {
58474 char *name;
58475 sa_family_t family;
58476 - struct file_operations seq_fops;
58477 - struct seq_operations seq_ops;
58478 + file_operations_no_const seq_fops;
58479 + seq_operations_no_const seq_ops;
58480 };
58481
58482 struct tcp_iter_state {
58483 diff -urNp linux-3.0.4/include/net/udp.h linux-3.0.4/include/net/udp.h
58484 --- linux-3.0.4/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
58485 +++ linux-3.0.4/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
58486 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
58487 char *name;
58488 sa_family_t family;
58489 struct udp_table *udp_table;
58490 - struct file_operations seq_fops;
58491 - struct seq_operations seq_ops;
58492 + file_operations_no_const seq_fops;
58493 + seq_operations_no_const seq_ops;
58494 };
58495
58496 struct udp_iter_state {
58497 diff -urNp linux-3.0.4/include/net/xfrm.h linux-3.0.4/include/net/xfrm.h
58498 --- linux-3.0.4/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
58499 +++ linux-3.0.4/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
58500 @@ -505,7 +505,7 @@ struct xfrm_policy {
58501 struct timer_list timer;
58502
58503 struct flow_cache_object flo;
58504 - atomic_t genid;
58505 + atomic_unchecked_t genid;
58506 u32 priority;
58507 u32 index;
58508 struct xfrm_mark mark;
58509 diff -urNp linux-3.0.4/include/rdma/iw_cm.h linux-3.0.4/include/rdma/iw_cm.h
58510 --- linux-3.0.4/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
58511 +++ linux-3.0.4/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
58512 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
58513 int backlog);
58514
58515 int (*destroy_listen)(struct iw_cm_id *cm_id);
58516 -};
58517 +} __no_const;
58518
58519 /**
58520 * iw_create_cm_id - Create an IW CM identifier.
58521 diff -urNp linux-3.0.4/include/scsi/libfc.h linux-3.0.4/include/scsi/libfc.h
58522 --- linux-3.0.4/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
58523 +++ linux-3.0.4/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
58524 @@ -750,6 +750,7 @@ struct libfc_function_template {
58525 */
58526 void (*disc_stop_final) (struct fc_lport *);
58527 };
58528 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
58529
58530 /**
58531 * struct fc_disc - Discovery context
58532 @@ -853,7 +854,7 @@ struct fc_lport {
58533 struct fc_vport *vport;
58534
58535 /* Operational Information */
58536 - struct libfc_function_template tt;
58537 + libfc_function_template_no_const tt;
58538 u8 link_up;
58539 u8 qfull;
58540 enum fc_lport_state state;
58541 diff -urNp linux-3.0.4/include/scsi/scsi_device.h linux-3.0.4/include/scsi/scsi_device.h
58542 --- linux-3.0.4/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
58543 +++ linux-3.0.4/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
58544 @@ -161,9 +161,9 @@ struct scsi_device {
58545 unsigned int max_device_blocked; /* what device_blocked counts down from */
58546 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58547
58548 - atomic_t iorequest_cnt;
58549 - atomic_t iodone_cnt;
58550 - atomic_t ioerr_cnt;
58551 + atomic_unchecked_t iorequest_cnt;
58552 + atomic_unchecked_t iodone_cnt;
58553 + atomic_unchecked_t ioerr_cnt;
58554
58555 struct device sdev_gendev,
58556 sdev_dev;
58557 diff -urNp linux-3.0.4/include/scsi/scsi_transport_fc.h linux-3.0.4/include/scsi/scsi_transport_fc.h
58558 --- linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
58559 +++ linux-3.0.4/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
58560 @@ -711,7 +711,7 @@ struct fc_function_template {
58561 unsigned long show_host_system_hostname:1;
58562
58563 unsigned long disable_target_scan:1;
58564 -};
58565 +} __do_const;
58566
58567
58568 /**
58569 diff -urNp linux-3.0.4/include/sound/ak4xxx-adda.h linux-3.0.4/include/sound/ak4xxx-adda.h
58570 --- linux-3.0.4/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
58571 +++ linux-3.0.4/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
58572 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
58573 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
58574 unsigned char val);
58575 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
58576 -};
58577 +} __no_const;
58578
58579 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
58580
58581 diff -urNp linux-3.0.4/include/sound/hwdep.h linux-3.0.4/include/sound/hwdep.h
58582 --- linux-3.0.4/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
58583 +++ linux-3.0.4/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
58584 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
58585 struct snd_hwdep_dsp_status *status);
58586 int (*dsp_load)(struct snd_hwdep *hw,
58587 struct snd_hwdep_dsp_image *image);
58588 -};
58589 +} __no_const;
58590
58591 struct snd_hwdep {
58592 struct snd_card *card;
58593 diff -urNp linux-3.0.4/include/sound/info.h linux-3.0.4/include/sound/info.h
58594 --- linux-3.0.4/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
58595 +++ linux-3.0.4/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
58596 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
58597 struct snd_info_buffer *buffer);
58598 void (*write)(struct snd_info_entry *entry,
58599 struct snd_info_buffer *buffer);
58600 -};
58601 +} __no_const;
58602
58603 struct snd_info_entry_ops {
58604 int (*open)(struct snd_info_entry *entry,
58605 diff -urNp linux-3.0.4/include/sound/pcm.h linux-3.0.4/include/sound/pcm.h
58606 --- linux-3.0.4/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
58607 +++ linux-3.0.4/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
58608 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
58609 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
58610 int (*ack)(struct snd_pcm_substream *substream);
58611 };
58612 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
58613
58614 /*
58615 *
58616 diff -urNp linux-3.0.4/include/sound/sb16_csp.h linux-3.0.4/include/sound/sb16_csp.h
58617 --- linux-3.0.4/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
58618 +++ linux-3.0.4/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
58619 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
58620 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
58621 int (*csp_stop) (struct snd_sb_csp * p);
58622 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
58623 -};
58624 +} __no_const;
58625
58626 /*
58627 * CSP private data
58628 diff -urNp linux-3.0.4/include/sound/soc.h linux-3.0.4/include/sound/soc.h
58629 --- linux-3.0.4/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
58630 +++ linux-3.0.4/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
58631 @@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
58632
58633 /* platform stream ops */
58634 struct snd_pcm_ops *ops;
58635 -};
58636 +} __do_const;
58637
58638 struct snd_soc_platform {
58639 const char *name;
58640 diff -urNp linux-3.0.4/include/sound/ymfpci.h linux-3.0.4/include/sound/ymfpci.h
58641 --- linux-3.0.4/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
58642 +++ linux-3.0.4/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
58643 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58644 spinlock_t reg_lock;
58645 spinlock_t voice_lock;
58646 wait_queue_head_t interrupt_sleep;
58647 - atomic_t interrupt_sleep_count;
58648 + atomic_unchecked_t interrupt_sleep_count;
58649 struct snd_info_entry *proc_entry;
58650 const struct firmware *dsp_microcode;
58651 const struct firmware *controller_microcode;
58652 diff -urNp linux-3.0.4/include/target/target_core_base.h linux-3.0.4/include/target/target_core_base.h
58653 --- linux-3.0.4/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
58654 +++ linux-3.0.4/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
58655 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
58656 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
58657 int (*t10_pr_register)(struct se_cmd *);
58658 int (*t10_pr_clear)(struct se_cmd *);
58659 -};
58660 +} __no_const;
58661
58662 struct t10_reservation_template {
58663 /* Reservation effects all target ports */
58664 @@ -432,8 +432,8 @@ struct se_transport_task {
58665 atomic_t t_task_cdbs_left;
58666 atomic_t t_task_cdbs_ex_left;
58667 atomic_t t_task_cdbs_timeout_left;
58668 - atomic_t t_task_cdbs_sent;
58669 - atomic_t t_transport_aborted;
58670 + atomic_unchecked_t t_task_cdbs_sent;
58671 + atomic_unchecked_t t_transport_aborted;
58672 atomic_t t_transport_active;
58673 atomic_t t_transport_complete;
58674 atomic_t t_transport_queue_active;
58675 @@ -774,7 +774,7 @@ struct se_device {
58676 atomic_t active_cmds;
58677 atomic_t simple_cmds;
58678 atomic_t depth_left;
58679 - atomic_t dev_ordered_id;
58680 + atomic_unchecked_t dev_ordered_id;
58681 atomic_t dev_tur_active;
58682 atomic_t execute_tasks;
58683 atomic_t dev_status_thr_count;
58684 diff -urNp linux-3.0.4/include/trace/events/irq.h linux-3.0.4/include/trace/events/irq.h
58685 --- linux-3.0.4/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
58686 +++ linux-3.0.4/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
58687 @@ -36,7 +36,7 @@ struct softirq_action;
58688 */
58689 TRACE_EVENT(irq_handler_entry,
58690
58691 - TP_PROTO(int irq, struct irqaction *action),
58692 + TP_PROTO(int irq, const struct irqaction *action),
58693
58694 TP_ARGS(irq, action),
58695
58696 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
58697 */
58698 TRACE_EVENT(irq_handler_exit,
58699
58700 - TP_PROTO(int irq, struct irqaction *action, int ret),
58701 + TP_PROTO(int irq, const struct irqaction *action, int ret),
58702
58703 TP_ARGS(irq, action, ret),
58704
58705 diff -urNp linux-3.0.4/include/video/udlfb.h linux-3.0.4/include/video/udlfb.h
58706 --- linux-3.0.4/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
58707 +++ linux-3.0.4/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
58708 @@ -51,10 +51,10 @@ struct dlfb_data {
58709 int base8;
58710 u32 pseudo_palette[256];
58711 /* blit-only rendering path metrics, exposed through sysfs */
58712 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58713 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
58714 - atomic_t bytes_sent; /* to usb, after compression including overhead */
58715 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
58716 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
58717 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
58718 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
58719 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
58720 };
58721
58722 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
58723 diff -urNp linux-3.0.4/include/video/uvesafb.h linux-3.0.4/include/video/uvesafb.h
58724 --- linux-3.0.4/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
58725 +++ linux-3.0.4/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
58726 @@ -177,6 +177,7 @@ struct uvesafb_par {
58727 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58728 u8 pmi_setpal; /* PMI for palette changes */
58729 u16 *pmi_base; /* protected mode interface location */
58730 + u8 *pmi_code; /* protected mode code location */
58731 void *pmi_start;
58732 void *pmi_pal;
58733 u8 *vbe_state_orig; /*
58734 diff -urNp linux-3.0.4/init/do_mounts.c linux-3.0.4/init/do_mounts.c
58735 --- linux-3.0.4/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
58736 +++ linux-3.0.4/init/do_mounts.c 2011-08-23 21:47:56.000000000 -0400
58737 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
58738
58739 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58740 {
58741 - int err = sys_mount(name, "/root", fs, flags, data);
58742 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58743 if (err)
58744 return err;
58745
58746 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
58747 va_start(args, fmt);
58748 vsprintf(buf, fmt, args);
58749 va_end(args);
58750 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58751 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58752 if (fd >= 0) {
58753 sys_ioctl(fd, FDEJECT, 0);
58754 sys_close(fd);
58755 }
58756 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58757 - fd = sys_open("/dev/console", O_RDWR, 0);
58758 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58759 if (fd >= 0) {
58760 sys_ioctl(fd, TCGETS, (long)&termios);
58761 termios.c_lflag &= ~ICANON;
58762 sys_ioctl(fd, TCSETSF, (long)&termios);
58763 - sys_read(fd, &c, 1);
58764 + sys_read(fd, (char __user *)&c, 1);
58765 termios.c_lflag |= ICANON;
58766 sys_ioctl(fd, TCSETSF, (long)&termios);
58767 sys_close(fd);
58768 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
58769 mount_root();
58770 out:
58771 devtmpfs_mount("dev");
58772 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58773 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58774 sys_chroot((const char __user __force *)".");
58775 }
58776 diff -urNp linux-3.0.4/init/do_mounts.h linux-3.0.4/init/do_mounts.h
58777 --- linux-3.0.4/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
58778 +++ linux-3.0.4/init/do_mounts.h 2011-08-23 21:47:56.000000000 -0400
58779 @@ -15,15 +15,15 @@ extern int root_mountflags;
58780
58781 static inline int create_dev(char *name, dev_t dev)
58782 {
58783 - sys_unlink(name);
58784 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58785 + sys_unlink((__force char __user *)name);
58786 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58787 }
58788
58789 #if BITS_PER_LONG == 32
58790 static inline u32 bstat(char *name)
58791 {
58792 struct stat64 stat;
58793 - if (sys_stat64(name, &stat) != 0)
58794 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58795 return 0;
58796 if (!S_ISBLK(stat.st_mode))
58797 return 0;
58798 diff -urNp linux-3.0.4/init/do_mounts_initrd.c linux-3.0.4/init/do_mounts_initrd.c
58799 --- linux-3.0.4/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
58800 +++ linux-3.0.4/init/do_mounts_initrd.c 2011-08-23 21:47:56.000000000 -0400
58801 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58802 create_dev("/dev/root.old", Root_RAM0);
58803 /* mount initrd on rootfs' /root */
58804 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58805 - sys_mkdir("/old", 0700);
58806 - root_fd = sys_open("/", 0, 0);
58807 - old_fd = sys_open("/old", 0, 0);
58808 + sys_mkdir((__force const char __user *)"/old", 0700);
58809 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58810 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58811 /* move initrd over / and chdir/chroot in initrd root */
58812 - sys_chdir("/root");
58813 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58814 - sys_chroot(".");
58815 + sys_chdir((__force const char __user *)"/root");
58816 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58817 + sys_chroot((__force const char __user *)".");
58818
58819 /*
58820 * In case that a resume from disk is carried out by linuxrc or one of
58821 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58822
58823 /* move initrd to rootfs' /old */
58824 sys_fchdir(old_fd);
58825 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58826 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58827 /* switch root and cwd back to / of rootfs */
58828 sys_fchdir(root_fd);
58829 - sys_chroot(".");
58830 + sys_chroot((__force const char __user *)".");
58831 sys_close(old_fd);
58832 sys_close(root_fd);
58833
58834 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58835 - sys_chdir("/old");
58836 + sys_chdir((__force const char __user *)"/old");
58837 return;
58838 }
58839
58840 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58841 mount_root();
58842
58843 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58844 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58845 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58846 if (!error)
58847 printk("okay\n");
58848 else {
58849 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58850 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58851 if (error == -ENOENT)
58852 printk("/initrd does not exist. Ignored.\n");
58853 else
58854 printk("failed\n");
58855 printk(KERN_NOTICE "Unmounting old root\n");
58856 - sys_umount("/old", MNT_DETACH);
58857 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58858 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58859 if (fd < 0) {
58860 error = fd;
58861 @@ -116,11 +116,11 @@ int __init initrd_load(void)
58862 * mounted in the normal path.
58863 */
58864 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58865 - sys_unlink("/initrd.image");
58866 + sys_unlink((__force const char __user *)"/initrd.image");
58867 handle_initrd();
58868 return 1;
58869 }
58870 }
58871 - sys_unlink("/initrd.image");
58872 + sys_unlink((__force const char __user *)"/initrd.image");
58873 return 0;
58874 }
58875 diff -urNp linux-3.0.4/init/do_mounts_md.c linux-3.0.4/init/do_mounts_md.c
58876 --- linux-3.0.4/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
58877 +++ linux-3.0.4/init/do_mounts_md.c 2011-08-23 21:47:56.000000000 -0400
58878 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58879 partitioned ? "_d" : "", minor,
58880 md_setup_args[ent].device_names);
58881
58882 - fd = sys_open(name, 0, 0);
58883 + fd = sys_open((__force char __user *)name, 0, 0);
58884 if (fd < 0) {
58885 printk(KERN_ERR "md: open failed - cannot start "
58886 "array %s\n", name);
58887 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58888 * array without it
58889 */
58890 sys_close(fd);
58891 - fd = sys_open(name, 0, 0);
58892 + fd = sys_open((__force char __user *)name, 0, 0);
58893 sys_ioctl(fd, BLKRRPART, 0);
58894 }
58895 sys_close(fd);
58896 diff -urNp linux-3.0.4/init/initramfs.c linux-3.0.4/init/initramfs.c
58897 --- linux-3.0.4/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
58898 +++ linux-3.0.4/init/initramfs.c 2011-08-23 21:47:56.000000000 -0400
58899 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58900 }
58901 }
58902
58903 -static long __init do_utime(char __user *filename, time_t mtime)
58904 +static long __init do_utime(__force char __user *filename, time_t mtime)
58905 {
58906 struct timespec t[2];
58907
58908 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58909 struct dir_entry *de, *tmp;
58910 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58911 list_del(&de->list);
58912 - do_utime(de->name, de->mtime);
58913 + do_utime((__force char __user *)de->name, de->mtime);
58914 kfree(de->name);
58915 kfree(de);
58916 }
58917 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58918 if (nlink >= 2) {
58919 char *old = find_link(major, minor, ino, mode, collected);
58920 if (old)
58921 - return (sys_link(old, collected) < 0) ? -1 : 1;
58922 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58923 }
58924 return 0;
58925 }
58926 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58927 {
58928 struct stat st;
58929
58930 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58931 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58932 if (S_ISDIR(st.st_mode))
58933 - sys_rmdir(path);
58934 + sys_rmdir((__force char __user *)path);
58935 else
58936 - sys_unlink(path);
58937 + sys_unlink((__force char __user *)path);
58938 }
58939 }
58940
58941 @@ -305,7 +305,7 @@ static int __init do_name(void)
58942 int openflags = O_WRONLY|O_CREAT;
58943 if (ml != 1)
58944 openflags |= O_TRUNC;
58945 - wfd = sys_open(collected, openflags, mode);
58946 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58947
58948 if (wfd >= 0) {
58949 sys_fchown(wfd, uid, gid);
58950 @@ -317,17 +317,17 @@ static int __init do_name(void)
58951 }
58952 }
58953 } else if (S_ISDIR(mode)) {
58954 - sys_mkdir(collected, mode);
58955 - sys_chown(collected, uid, gid);
58956 - sys_chmod(collected, mode);
58957 + sys_mkdir((__force char __user *)collected, mode);
58958 + sys_chown((__force char __user *)collected, uid, gid);
58959 + sys_chmod((__force char __user *)collected, mode);
58960 dir_add(collected, mtime);
58961 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58962 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58963 if (maybe_link() == 0) {
58964 - sys_mknod(collected, mode, rdev);
58965 - sys_chown(collected, uid, gid);
58966 - sys_chmod(collected, mode);
58967 - do_utime(collected, mtime);
58968 + sys_mknod((__force char __user *)collected, mode, rdev);
58969 + sys_chown((__force char __user *)collected, uid, gid);
58970 + sys_chmod((__force char __user *)collected, mode);
58971 + do_utime((__force char __user *)collected, mtime);
58972 }
58973 }
58974 return 0;
58975 @@ -336,15 +336,15 @@ static int __init do_name(void)
58976 static int __init do_copy(void)
58977 {
58978 if (count >= body_len) {
58979 - sys_write(wfd, victim, body_len);
58980 + sys_write(wfd, (__force char __user *)victim, body_len);
58981 sys_close(wfd);
58982 - do_utime(vcollected, mtime);
58983 + do_utime((__force char __user *)vcollected, mtime);
58984 kfree(vcollected);
58985 eat(body_len);
58986 state = SkipIt;
58987 return 0;
58988 } else {
58989 - sys_write(wfd, victim, count);
58990 + sys_write(wfd, (__force char __user *)victim, count);
58991 body_len -= count;
58992 eat(count);
58993 return 1;
58994 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58995 {
58996 collected[N_ALIGN(name_len) + body_len] = '\0';
58997 clean_path(collected, 0);
58998 - sys_symlink(collected + N_ALIGN(name_len), collected);
58999 - sys_lchown(collected, uid, gid);
59000 - do_utime(collected, mtime);
59001 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
59002 + sys_lchown((__force char __user *)collected, uid, gid);
59003 + do_utime((__force char __user *)collected, mtime);
59004 state = SkipIt;
59005 next_state = Reset;
59006 return 0;
59007 diff -urNp linux-3.0.4/init/Kconfig linux-3.0.4/init/Kconfig
59008 --- linux-3.0.4/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
59009 +++ linux-3.0.4/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
59010 @@ -1195,7 +1195,7 @@ config SLUB_DEBUG
59011
59012 config COMPAT_BRK
59013 bool "Disable heap randomization"
59014 - default y
59015 + default n
59016 help
59017 Randomizing heap placement makes heap exploits harder, but it
59018 also breaks ancient binaries (including anything libc5 based).
59019 diff -urNp linux-3.0.4/init/main.c linux-3.0.4/init/main.c
59020 --- linux-3.0.4/init/main.c 2011-07-21 22:17:23.000000000 -0400
59021 +++ linux-3.0.4/init/main.c 2011-08-23 21:48:14.000000000 -0400
59022 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
59023 extern void tc_init(void);
59024 #endif
59025
59026 +extern void grsecurity_init(void);
59027 +
59028 /*
59029 * Debug helper: via this flag we know that we are in 'early bootup code'
59030 * where only the boot processor is running with IRQ disabled. This means
59031 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
59032
59033 __setup("reset_devices", set_reset_devices);
59034
59035 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
59036 +extern char pax_enter_kernel_user[];
59037 +extern char pax_exit_kernel_user[];
59038 +extern pgdval_t clone_pgd_mask;
59039 +#endif
59040 +
59041 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
59042 +static int __init setup_pax_nouderef(char *str)
59043 +{
59044 +#ifdef CONFIG_X86_32
59045 + unsigned int cpu;
59046 + struct desc_struct *gdt;
59047 +
59048 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
59049 + gdt = get_cpu_gdt_table(cpu);
59050 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
59051 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
59052 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
59053 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
59054 + }
59055 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
59056 +#else
59057 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
59058 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
59059 + clone_pgd_mask = ~(pgdval_t)0UL;
59060 +#endif
59061 +
59062 + return 0;
59063 +}
59064 +early_param("pax_nouderef", setup_pax_nouderef);
59065 +#endif
59066 +
59067 +#ifdef CONFIG_PAX_SOFTMODE
59068 +int pax_softmode;
59069 +
59070 +static int __init setup_pax_softmode(char *str)
59071 +{
59072 + get_option(&str, &pax_softmode);
59073 + return 1;
59074 +}
59075 +__setup("pax_softmode=", setup_pax_softmode);
59076 +#endif
59077 +
59078 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
59079 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
59080 static const char *panic_later, *panic_param;
59081 @@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
59082 {
59083 int count = preempt_count();
59084 int ret;
59085 + const char *msg1 = "", *msg2 = "";
59086
59087 if (initcall_debug)
59088 ret = do_one_initcall_debug(fn);
59089 @@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
59090 sprintf(msgbuf, "error code %d ", ret);
59091
59092 if (preempt_count() != count) {
59093 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
59094 + msg1 = " preemption imbalance";
59095 preempt_count() = count;
59096 }
59097 if (irqs_disabled()) {
59098 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
59099 + msg2 = " disabled interrupts";
59100 local_irq_enable();
59101 }
59102 - if (msgbuf[0]) {
59103 - printk("initcall %pF returned with %s\n", fn, msgbuf);
59104 + if (msgbuf[0] || *msg1 || *msg2) {
59105 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
59106 }
59107
59108 return ret;
59109 @@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
59110 do_basic_setup();
59111
59112 /* Open the /dev/console on the rootfs, this should never fail */
59113 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
59114 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
59115 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
59116
59117 (void) sys_dup(0);
59118 @@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
59119 if (!ramdisk_execute_command)
59120 ramdisk_execute_command = "/init";
59121
59122 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
59123 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
59124 ramdisk_execute_command = NULL;
59125 prepare_namespace();
59126 }
59127
59128 + grsecurity_init();
59129 +
59130 /*
59131 * Ok, we have completed the initial bootup, and
59132 * we're essentially up and running. Get rid of the
59133 diff -urNp linux-3.0.4/ipc/mqueue.c linux-3.0.4/ipc/mqueue.c
59134 --- linux-3.0.4/ipc/mqueue.c 2011-07-21 22:17:23.000000000 -0400
59135 +++ linux-3.0.4/ipc/mqueue.c 2011-08-23 21:48:14.000000000 -0400
59136 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
59137 mq_bytes = (mq_msg_tblsz +
59138 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
59139
59140 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
59141 spin_lock(&mq_lock);
59142 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
59143 u->mq_bytes + mq_bytes >
59144 diff -urNp linux-3.0.4/ipc/msg.c linux-3.0.4/ipc/msg.c
59145 --- linux-3.0.4/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
59146 +++ linux-3.0.4/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
59147 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
59148 return security_msg_queue_associate(msq, msgflg);
59149 }
59150
59151 +static struct ipc_ops msg_ops = {
59152 + .getnew = newque,
59153 + .associate = msg_security,
59154 + .more_checks = NULL
59155 +};
59156 +
59157 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
59158 {
59159 struct ipc_namespace *ns;
59160 - struct ipc_ops msg_ops;
59161 struct ipc_params msg_params;
59162
59163 ns = current->nsproxy->ipc_ns;
59164
59165 - msg_ops.getnew = newque;
59166 - msg_ops.associate = msg_security;
59167 - msg_ops.more_checks = NULL;
59168 -
59169 msg_params.key = key;
59170 msg_params.flg = msgflg;
59171
59172 diff -urNp linux-3.0.4/ipc/sem.c linux-3.0.4/ipc/sem.c
59173 --- linux-3.0.4/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
59174 +++ linux-3.0.4/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
59175 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
59176 return 0;
59177 }
59178
59179 +static struct ipc_ops sem_ops = {
59180 + .getnew = newary,
59181 + .associate = sem_security,
59182 + .more_checks = sem_more_checks
59183 +};
59184 +
59185 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
59186 {
59187 struct ipc_namespace *ns;
59188 - struct ipc_ops sem_ops;
59189 struct ipc_params sem_params;
59190
59191 ns = current->nsproxy->ipc_ns;
59192 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
59193 if (nsems < 0 || nsems > ns->sc_semmsl)
59194 return -EINVAL;
59195
59196 - sem_ops.getnew = newary;
59197 - sem_ops.associate = sem_security;
59198 - sem_ops.more_checks = sem_more_checks;
59199 -
59200 sem_params.key = key;
59201 sem_params.flg = semflg;
59202 sem_params.u.nsems = nsems;
59203 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
59204 int nsems;
59205 struct list_head tasks;
59206
59207 + pax_track_stack();
59208 +
59209 sma = sem_lock_check(ns, semid);
59210 if (IS_ERR(sma))
59211 return PTR_ERR(sma);
59212 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
59213 struct ipc_namespace *ns;
59214 struct list_head tasks;
59215
59216 + pax_track_stack();
59217 +
59218 ns = current->nsproxy->ipc_ns;
59219
59220 if (nsops < 1 || semid < 0)
59221 diff -urNp linux-3.0.4/ipc/shm.c linux-3.0.4/ipc/shm.c
59222 --- linux-3.0.4/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
59223 +++ linux-3.0.4/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
59224 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
59225 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
59226 #endif
59227
59228 +#ifdef CONFIG_GRKERNSEC
59229 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59230 + const time_t shm_createtime, const uid_t cuid,
59231 + const int shmid);
59232 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59233 + const time_t shm_createtime);
59234 +#endif
59235 +
59236 void shm_init_ns(struct ipc_namespace *ns)
59237 {
59238 ns->shm_ctlmax = SHMMAX;
59239 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
59240 shp->shm_lprid = 0;
59241 shp->shm_atim = shp->shm_dtim = 0;
59242 shp->shm_ctim = get_seconds();
59243 +#ifdef CONFIG_GRKERNSEC
59244 + {
59245 + struct timespec timeval;
59246 + do_posix_clock_monotonic_gettime(&timeval);
59247 +
59248 + shp->shm_createtime = timeval.tv_sec;
59249 + }
59250 +#endif
59251 shp->shm_segsz = size;
59252 shp->shm_nattch = 0;
59253 shp->shm_file = file;
59254 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
59255 return 0;
59256 }
59257
59258 +static struct ipc_ops shm_ops = {
59259 + .getnew = newseg,
59260 + .associate = shm_security,
59261 + .more_checks = shm_more_checks
59262 +};
59263 +
59264 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
59265 {
59266 struct ipc_namespace *ns;
59267 - struct ipc_ops shm_ops;
59268 struct ipc_params shm_params;
59269
59270 ns = current->nsproxy->ipc_ns;
59271
59272 - shm_ops.getnew = newseg;
59273 - shm_ops.associate = shm_security;
59274 - shm_ops.more_checks = shm_more_checks;
59275 -
59276 shm_params.key = key;
59277 shm_params.flg = shmflg;
59278 shm_params.u.size = size;
59279 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
59280 case SHM_LOCK:
59281 case SHM_UNLOCK:
59282 {
59283 - struct file *uninitialized_var(shm_file);
59284 -
59285 lru_add_drain_all(); /* drain pagevecs to lru lists */
59286
59287 shp = shm_lock_check(ns, shmid);
59288 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
59289 if (err)
59290 goto out_unlock;
59291
59292 +#ifdef CONFIG_GRKERNSEC
59293 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
59294 + shp->shm_perm.cuid, shmid) ||
59295 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
59296 + err = -EACCES;
59297 + goto out_unlock;
59298 + }
59299 +#endif
59300 +
59301 path = shp->shm_file->f_path;
59302 path_get(&path);
59303 shp->shm_nattch++;
59304 +#ifdef CONFIG_GRKERNSEC
59305 + shp->shm_lapid = current->pid;
59306 +#endif
59307 size = i_size_read(path.dentry->d_inode);
59308 shm_unlock(shp);
59309
59310 diff -urNp linux-3.0.4/kernel/acct.c linux-3.0.4/kernel/acct.c
59311 --- linux-3.0.4/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
59312 +++ linux-3.0.4/kernel/acct.c 2011-08-23 21:47:56.000000000 -0400
59313 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
59314 */
59315 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
59316 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
59317 - file->f_op->write(file, (char *)&ac,
59318 + file->f_op->write(file, (__force char __user *)&ac,
59319 sizeof(acct_t), &file->f_pos);
59320 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
59321 set_fs(fs);
59322 diff -urNp linux-3.0.4/kernel/audit.c linux-3.0.4/kernel/audit.c
59323 --- linux-3.0.4/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
59324 +++ linux-3.0.4/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
59325 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
59326 3) suppressed due to audit_rate_limit
59327 4) suppressed due to audit_backlog_limit
59328 */
59329 -static atomic_t audit_lost = ATOMIC_INIT(0);
59330 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
59331
59332 /* The netlink socket. */
59333 static struct sock *audit_sock;
59334 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
59335 unsigned long now;
59336 int print;
59337
59338 - atomic_inc(&audit_lost);
59339 + atomic_inc_unchecked(&audit_lost);
59340
59341 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
59342
59343 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
59344 printk(KERN_WARNING
59345 "audit: audit_lost=%d audit_rate_limit=%d "
59346 "audit_backlog_limit=%d\n",
59347 - atomic_read(&audit_lost),
59348 + atomic_read_unchecked(&audit_lost),
59349 audit_rate_limit,
59350 audit_backlog_limit);
59351 audit_panic(message);
59352 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
59353 status_set.pid = audit_pid;
59354 status_set.rate_limit = audit_rate_limit;
59355 status_set.backlog_limit = audit_backlog_limit;
59356 - status_set.lost = atomic_read(&audit_lost);
59357 + status_set.lost = atomic_read_unchecked(&audit_lost);
59358 status_set.backlog = skb_queue_len(&audit_skb_queue);
59359 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
59360 &status_set, sizeof(status_set));
59361 diff -urNp linux-3.0.4/kernel/auditsc.c linux-3.0.4/kernel/auditsc.c
59362 --- linux-3.0.4/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
59363 +++ linux-3.0.4/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
59364 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
59365 }
59366
59367 /* global counter which is incremented every time something logs in */
59368 -static atomic_t session_id = ATOMIC_INIT(0);
59369 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
59370
59371 /**
59372 * audit_set_loginuid - set a task's audit_context loginuid
59373 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
59374 */
59375 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
59376 {
59377 - unsigned int sessionid = atomic_inc_return(&session_id);
59378 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
59379 struct audit_context *context = task->audit_context;
59380
59381 if (context && context->in_syscall) {
59382 diff -urNp linux-3.0.4/kernel/capability.c linux-3.0.4/kernel/capability.c
59383 --- linux-3.0.4/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
59384 +++ linux-3.0.4/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
59385 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
59386 * before modification is attempted and the application
59387 * fails.
59388 */
59389 + if (tocopy > ARRAY_SIZE(kdata))
59390 + return -EFAULT;
59391 +
59392 if (copy_to_user(dataptr, kdata, tocopy
59393 * sizeof(struct __user_cap_data_struct))) {
59394 return -EFAULT;
59395 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
59396 BUG();
59397 }
59398
59399 - if (security_capable(ns, current_cred(), cap) == 0) {
59400 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
59401 current->flags |= PF_SUPERPRIV;
59402 return true;
59403 }
59404 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
59405 }
59406 EXPORT_SYMBOL(ns_capable);
59407
59408 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
59409 +{
59410 + if (unlikely(!cap_valid(cap))) {
59411 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
59412 + BUG();
59413 + }
59414 +
59415 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
59416 + current->flags |= PF_SUPERPRIV;
59417 + return true;
59418 + }
59419 + return false;
59420 +}
59421 +EXPORT_SYMBOL(ns_capable_nolog);
59422 +
59423 +bool capable_nolog(int cap)
59424 +{
59425 + return ns_capable_nolog(&init_user_ns, cap);
59426 +}
59427 +EXPORT_SYMBOL(capable_nolog);
59428 +
59429 /**
59430 * task_ns_capable - Determine whether current task has a superior
59431 * capability targeted at a specific task's user namespace.
59432 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
59433 }
59434 EXPORT_SYMBOL(task_ns_capable);
59435
59436 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
59437 +{
59438 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
59439 +}
59440 +EXPORT_SYMBOL(task_ns_capable_nolog);
59441 +
59442 /**
59443 * nsown_capable - Check superior capability to one's own user_ns
59444 * @cap: The capability in question
59445 diff -urNp linux-3.0.4/kernel/cgroup.c linux-3.0.4/kernel/cgroup.c
59446 --- linux-3.0.4/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
59447 +++ linux-3.0.4/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
59448 @@ -593,6 +593,8 @@ static struct css_set *find_css_set(
59449 struct hlist_head *hhead;
59450 struct cg_cgroup_link *link;
59451
59452 + pax_track_stack();
59453 +
59454 /* First see if we already have a cgroup group that matches
59455 * the desired set */
59456 read_lock(&css_set_lock);
59457 diff -urNp linux-3.0.4/kernel/compat.c linux-3.0.4/kernel/compat.c
59458 --- linux-3.0.4/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
59459 +++ linux-3.0.4/kernel/compat.c 2011-08-23 21:48:14.000000000 -0400
59460 @@ -13,6 +13,7 @@
59461
59462 #include <linux/linkage.h>
59463 #include <linux/compat.h>
59464 +#include <linux/module.h>
59465 #include <linux/errno.h>
59466 #include <linux/time.h>
59467 #include <linux/signal.h>
59468 diff -urNp linux-3.0.4/kernel/configs.c linux-3.0.4/kernel/configs.c
59469 --- linux-3.0.4/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
59470 +++ linux-3.0.4/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
59471 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
59472 struct proc_dir_entry *entry;
59473
59474 /* create the current config file */
59475 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59476 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
59477 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
59478 + &ikconfig_file_ops);
59479 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59480 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
59481 + &ikconfig_file_ops);
59482 +#endif
59483 +#else
59484 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
59485 &ikconfig_file_ops);
59486 +#endif
59487 +
59488 if (!entry)
59489 return -ENOMEM;
59490
59491 diff -urNp linux-3.0.4/kernel/cred.c linux-3.0.4/kernel/cred.c
59492 --- linux-3.0.4/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
59493 +++ linux-3.0.4/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
59494 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
59495 */
59496 void __put_cred(struct cred *cred)
59497 {
59498 + pax_track_stack();
59499 +
59500 kdebug("__put_cred(%p{%d,%d})", cred,
59501 atomic_read(&cred->usage),
59502 read_cred_subscribers(cred));
59503 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
59504 {
59505 struct cred *cred;
59506
59507 + pax_track_stack();
59508 +
59509 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
59510 atomic_read(&tsk->cred->usage),
59511 read_cred_subscribers(tsk->cred));
59512 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
59513 {
59514 const struct cred *cred;
59515
59516 + pax_track_stack();
59517 +
59518 rcu_read_lock();
59519
59520 do {
59521 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
59522 {
59523 struct cred *new;
59524
59525 + pax_track_stack();
59526 +
59527 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
59528 if (!new)
59529 return NULL;
59530 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
59531 const struct cred *old;
59532 struct cred *new;
59533
59534 + pax_track_stack();
59535 +
59536 validate_process_creds();
59537
59538 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59539 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
59540 struct thread_group_cred *tgcred = NULL;
59541 struct cred *new;
59542
59543 + pax_track_stack();
59544 +
59545 #ifdef CONFIG_KEYS
59546 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59547 if (!tgcred)
59548 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
59549 struct cred *new;
59550 int ret;
59551
59552 + pax_track_stack();
59553 +
59554 if (
59555 #ifdef CONFIG_KEYS
59556 !p->cred->thread_keyring &&
59557 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
59558 struct task_struct *task = current;
59559 const struct cred *old = task->real_cred;
59560
59561 + pax_track_stack();
59562 +
59563 kdebug("commit_creds(%p{%d,%d})", new,
59564 atomic_read(&new->usage),
59565 read_cred_subscribers(new));
59566 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
59567
59568 get_cred(new); /* we will require a ref for the subj creds too */
59569
59570 + gr_set_role_label(task, new->uid, new->gid);
59571 +
59572 /* dumpability changes */
59573 if (old->euid != new->euid ||
59574 old->egid != new->egid ||
59575 @@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
59576 key_fsgid_changed(task);
59577
59578 /* do it
59579 - * - What if a process setreuid()'s and this brings the
59580 - * new uid over his NPROC rlimit? We can check this now
59581 - * cheaply with the new uid cache, so if it matters
59582 - * we should be checking for it. -DaveM
59583 + * RLIMIT_NPROC limits on user->processes have already been checked
59584 + * in set_user().
59585 */
59586 alter_cred_subscribers(new, 2);
59587 if (new->user != old->user)
59588 @@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
59589 */
59590 void abort_creds(struct cred *new)
59591 {
59592 + pax_track_stack();
59593 +
59594 kdebug("abort_creds(%p{%d,%d})", new,
59595 atomic_read(&new->usage),
59596 read_cred_subscribers(new));
59597 @@ -574,6 +592,8 @@ const struct cred *override_creds(const
59598 {
59599 const struct cred *old = current->cred;
59600
59601 + pax_track_stack();
59602 +
59603 kdebug("override_creds(%p{%d,%d})", new,
59604 atomic_read(&new->usage),
59605 read_cred_subscribers(new));
59606 @@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
59607 {
59608 const struct cred *override = current->cred;
59609
59610 + pax_track_stack();
59611 +
59612 kdebug("revert_creds(%p{%d,%d})", old,
59613 atomic_read(&old->usage),
59614 read_cred_subscribers(old));
59615 @@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
59616 const struct cred *old;
59617 struct cred *new;
59618
59619 + pax_track_stack();
59620 +
59621 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59622 if (!new)
59623 return NULL;
59624 @@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59625 */
59626 int set_security_override(struct cred *new, u32 secid)
59627 {
59628 + pax_track_stack();
59629 +
59630 return security_kernel_act_as(new, secid);
59631 }
59632 EXPORT_SYMBOL(set_security_override);
59633 @@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
59634 u32 secid;
59635 int ret;
59636
59637 + pax_track_stack();
59638 +
59639 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59640 if (ret < 0)
59641 return ret;
59642 diff -urNp linux-3.0.4/kernel/debug/debug_core.c linux-3.0.4/kernel/debug/debug_core.c
59643 --- linux-3.0.4/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
59644 +++ linux-3.0.4/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
59645 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
59646 */
59647 static atomic_t masters_in_kgdb;
59648 static atomic_t slaves_in_kgdb;
59649 -static atomic_t kgdb_break_tasklet_var;
59650 +static atomic_unchecked_t kgdb_break_tasklet_var;
59651 atomic_t kgdb_setting_breakpoint;
59652
59653 struct task_struct *kgdb_usethread;
59654 @@ -129,7 +129,7 @@ int kgdb_single_step;
59655 static pid_t kgdb_sstep_pid;
59656
59657 /* to keep track of the CPU which is doing the single stepping*/
59658 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59659 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59660
59661 /*
59662 * If you are debugging a problem where roundup (the collection of
59663 @@ -542,7 +542,7 @@ return_normal:
59664 * kernel will only try for the value of sstep_tries before
59665 * giving up and continuing on.
59666 */
59667 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59668 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59669 (kgdb_info[cpu].task &&
59670 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
59671 atomic_set(&kgdb_active, -1);
59672 @@ -636,8 +636,8 @@ cpu_master_loop:
59673 }
59674
59675 kgdb_restore:
59676 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
59677 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
59678 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
59679 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
59680 if (kgdb_info[sstep_cpu].task)
59681 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
59682 else
59683 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
59684 static void kgdb_tasklet_bpt(unsigned long ing)
59685 {
59686 kgdb_breakpoint();
59687 - atomic_set(&kgdb_break_tasklet_var, 0);
59688 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
59689 }
59690
59691 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
59692
59693 void kgdb_schedule_breakpoint(void)
59694 {
59695 - if (atomic_read(&kgdb_break_tasklet_var) ||
59696 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
59697 atomic_read(&kgdb_active) != -1 ||
59698 atomic_read(&kgdb_setting_breakpoint))
59699 return;
59700 - atomic_inc(&kgdb_break_tasklet_var);
59701 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
59702 tasklet_schedule(&kgdb_tasklet_breakpoint);
59703 }
59704 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
59705 diff -urNp linux-3.0.4/kernel/debug/kdb/kdb_main.c linux-3.0.4/kernel/debug/kdb/kdb_main.c
59706 --- linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
59707 +++ linux-3.0.4/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
59708 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
59709 list_for_each_entry(mod, kdb_modules, list) {
59710
59711 kdb_printf("%-20s%8u 0x%p ", mod->name,
59712 - mod->core_size, (void *)mod);
59713 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
59714 #ifdef CONFIG_MODULE_UNLOAD
59715 kdb_printf("%4d ", module_refcount(mod));
59716 #endif
59717 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
59718 kdb_printf(" (Loading)");
59719 else
59720 kdb_printf(" (Live)");
59721 - kdb_printf(" 0x%p", mod->module_core);
59722 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
59723
59724 #ifdef CONFIG_MODULE_UNLOAD
59725 {
59726 diff -urNp linux-3.0.4/kernel/events/core.c linux-3.0.4/kernel/events/core.c
59727 --- linux-3.0.4/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
59728 +++ linux-3.0.4/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
59729 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
59730 return 0;
59731 }
59732
59733 -static atomic64_t perf_event_id;
59734 +static atomic64_unchecked_t perf_event_id;
59735
59736 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
59737 enum event_type_t event_type);
59738 @@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
59739
59740 static inline u64 perf_event_count(struct perf_event *event)
59741 {
59742 - return local64_read(&event->count) + atomic64_read(&event->child_count);
59743 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
59744 }
59745
59746 static u64 perf_event_read(struct perf_event *event)
59747 @@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
59748 mutex_lock(&event->child_mutex);
59749 total += perf_event_read(event);
59750 *enabled += event->total_time_enabled +
59751 - atomic64_read(&event->child_total_time_enabled);
59752 + atomic64_read_unchecked(&event->child_total_time_enabled);
59753 *running += event->total_time_running +
59754 - atomic64_read(&event->child_total_time_running);
59755 + atomic64_read_unchecked(&event->child_total_time_running);
59756
59757 list_for_each_entry(child, &event->child_list, child_list) {
59758 total += perf_event_read(child);
59759 @@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
59760 userpg->offset -= local64_read(&event->hw.prev_count);
59761
59762 userpg->time_enabled = event->total_time_enabled +
59763 - atomic64_read(&event->child_total_time_enabled);
59764 + atomic64_read_unchecked(&event->child_total_time_enabled);
59765
59766 userpg->time_running = event->total_time_running +
59767 - atomic64_read(&event->child_total_time_running);
59768 + atomic64_read_unchecked(&event->child_total_time_running);
59769
59770 barrier();
59771 ++userpg->lock;
59772 @@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
59773 values[n++] = perf_event_count(event);
59774 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
59775 values[n++] = enabled +
59776 - atomic64_read(&event->child_total_time_enabled);
59777 + atomic64_read_unchecked(&event->child_total_time_enabled);
59778 }
59779 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
59780 values[n++] = running +
59781 - atomic64_read(&event->child_total_time_running);
59782 + atomic64_read_unchecked(&event->child_total_time_running);
59783 }
59784 if (read_format & PERF_FORMAT_ID)
59785 values[n++] = primary_event_id(event);
59786 @@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
59787 * need to add enough zero bytes after the string to handle
59788 * the 64bit alignment we do later.
59789 */
59790 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
59791 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
59792 if (!buf) {
59793 name = strncpy(tmp, "//enomem", sizeof(tmp));
59794 goto got_name;
59795 }
59796 - name = d_path(&file->f_path, buf, PATH_MAX);
59797 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
59798 if (IS_ERR(name)) {
59799 name = strncpy(tmp, "//toolong", sizeof(tmp));
59800 goto got_name;
59801 @@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
59802 event->parent = parent_event;
59803
59804 event->ns = get_pid_ns(current->nsproxy->pid_ns);
59805 - event->id = atomic64_inc_return(&perf_event_id);
59806 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
59807
59808 event->state = PERF_EVENT_STATE_INACTIVE;
59809
59810 @@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
59811 /*
59812 * Add back the child's count to the parent's count:
59813 */
59814 - atomic64_add(child_val, &parent_event->child_count);
59815 - atomic64_add(child_event->total_time_enabled,
59816 + atomic64_add_unchecked(child_val, &parent_event->child_count);
59817 + atomic64_add_unchecked(child_event->total_time_enabled,
59818 &parent_event->child_total_time_enabled);
59819 - atomic64_add(child_event->total_time_running,
59820 + atomic64_add_unchecked(child_event->total_time_running,
59821 &parent_event->child_total_time_running);
59822
59823 /*
59824 diff -urNp linux-3.0.4/kernel/exit.c linux-3.0.4/kernel/exit.c
59825 --- linux-3.0.4/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
59826 +++ linux-3.0.4/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
59827 @@ -57,6 +57,10 @@
59828 #include <asm/pgtable.h>
59829 #include <asm/mmu_context.h>
59830
59831 +#ifdef CONFIG_GRKERNSEC
59832 +extern rwlock_t grsec_exec_file_lock;
59833 +#endif
59834 +
59835 static void exit_mm(struct task_struct * tsk);
59836
59837 static void __unhash_process(struct task_struct *p, bool group_dead)
59838 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
59839 struct task_struct *leader;
59840 int zap_leader;
59841 repeat:
59842 +#ifdef CONFIG_NET
59843 + gr_del_task_from_ip_table(p);
59844 +#endif
59845 +
59846 tracehook_prepare_release_task(p);
59847 /* don't need to get the RCU readlock here - the process is dead and
59848 * can't be modifying its own credentials. But shut RCU-lockdep up */
59849 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
59850 {
59851 write_lock_irq(&tasklist_lock);
59852
59853 +#ifdef CONFIG_GRKERNSEC
59854 + write_lock(&grsec_exec_file_lock);
59855 + if (current->exec_file) {
59856 + fput(current->exec_file);
59857 + current->exec_file = NULL;
59858 + }
59859 + write_unlock(&grsec_exec_file_lock);
59860 +#endif
59861 +
59862 ptrace_unlink(current);
59863 /* Reparent to init */
59864 current->real_parent = current->parent = kthreadd_task;
59865 list_move_tail(&current->sibling, &current->real_parent->children);
59866
59867 + gr_set_kernel_label(current);
59868 +
59869 /* Set the exit signal to SIGCHLD so we signal init on exit */
59870 current->exit_signal = SIGCHLD;
59871
59872 @@ -394,7 +413,7 @@ int allow_signal(int sig)
59873 * know it'll be handled, so that they don't get converted to
59874 * SIGKILL or just silently dropped.
59875 */
59876 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59877 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59878 recalc_sigpending();
59879 spin_unlock_irq(&current->sighand->siglock);
59880 return 0;
59881 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
59882 vsnprintf(current->comm, sizeof(current->comm), name, args);
59883 va_end(args);
59884
59885 +#ifdef CONFIG_GRKERNSEC
59886 + write_lock(&grsec_exec_file_lock);
59887 + if (current->exec_file) {
59888 + fput(current->exec_file);
59889 + current->exec_file = NULL;
59890 + }
59891 + write_unlock(&grsec_exec_file_lock);
59892 +#endif
59893 +
59894 + gr_set_kernel_label(current);
59895 +
59896 /*
59897 * If we were started as result of loading a module, close all of the
59898 * user space pages. We don't need them, and if we didn't close them
59899 @@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
59900 struct task_struct *tsk = current;
59901 int group_dead;
59902
59903 - profile_task_exit(tsk);
59904 -
59905 - WARN_ON(atomic_read(&tsk->fs_excl));
59906 - WARN_ON(blk_needs_flush_plug(tsk));
59907 -
59908 if (unlikely(in_interrupt()))
59909 panic("Aiee, killing interrupt handler!");
59910 - if (unlikely(!tsk->pid))
59911 - panic("Attempted to kill the idle task!");
59912
59913 /*
59914 * If do_exit is called because this processes oopsed, it's possible
59915 @@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
59916 */
59917 set_fs(USER_DS);
59918
59919 + profile_task_exit(tsk);
59920 +
59921 + WARN_ON(atomic_read(&tsk->fs_excl));
59922 + WARN_ON(blk_needs_flush_plug(tsk));
59923 +
59924 + if (unlikely(!tsk->pid))
59925 + panic("Attempted to kill the idle task!");
59926 +
59927 tracehook_report_exit(&code);
59928
59929 validate_creds_for_do_exit(tsk);
59930 @@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
59931 tsk->exit_code = code;
59932 taskstats_exit(tsk, group_dead);
59933
59934 + gr_acl_handle_psacct(tsk, code);
59935 + gr_acl_handle_exit();
59936 +
59937 exit_mm(tsk);
59938
59939 if (group_dead)
59940 diff -urNp linux-3.0.4/kernel/fork.c linux-3.0.4/kernel/fork.c
59941 --- linux-3.0.4/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
59942 +++ linux-3.0.4/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
59943 @@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
59944 *stackend = STACK_END_MAGIC; /* for overflow detection */
59945
59946 #ifdef CONFIG_CC_STACKPROTECTOR
59947 - tsk->stack_canary = get_random_int();
59948 + tsk->stack_canary = pax_get_random_long();
59949 #endif
59950
59951 /* One for us, one for whoever does the "release_task()" (usually parent) */
59952 @@ -308,13 +308,77 @@ out:
59953 }
59954
59955 #ifdef CONFIG_MMU
59956 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
59957 +{
59958 + struct vm_area_struct *tmp;
59959 + unsigned long charge;
59960 + struct mempolicy *pol;
59961 + struct file *file;
59962 +
59963 + charge = 0;
59964 + if (mpnt->vm_flags & VM_ACCOUNT) {
59965 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
59966 + if (security_vm_enough_memory(len))
59967 + goto fail_nomem;
59968 + charge = len;
59969 + }
59970 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
59971 + if (!tmp)
59972 + goto fail_nomem;
59973 + *tmp = *mpnt;
59974 + tmp->vm_mm = mm;
59975 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
59976 + pol = mpol_dup(vma_policy(mpnt));
59977 + if (IS_ERR(pol))
59978 + goto fail_nomem_policy;
59979 + vma_set_policy(tmp, pol);
59980 + if (anon_vma_fork(tmp, mpnt))
59981 + goto fail_nomem_anon_vma_fork;
59982 + tmp->vm_flags &= ~VM_LOCKED;
59983 + tmp->vm_next = tmp->vm_prev = NULL;
59984 + tmp->vm_mirror = NULL;
59985 + file = tmp->vm_file;
59986 + if (file) {
59987 + struct inode *inode = file->f_path.dentry->d_inode;
59988 + struct address_space *mapping = file->f_mapping;
59989 +
59990 + get_file(file);
59991 + if (tmp->vm_flags & VM_DENYWRITE)
59992 + atomic_dec(&inode->i_writecount);
59993 + mutex_lock(&mapping->i_mmap_mutex);
59994 + if (tmp->vm_flags & VM_SHARED)
59995 + mapping->i_mmap_writable++;
59996 + flush_dcache_mmap_lock(mapping);
59997 + /* insert tmp into the share list, just after mpnt */
59998 + vma_prio_tree_add(tmp, mpnt);
59999 + flush_dcache_mmap_unlock(mapping);
60000 + mutex_unlock(&mapping->i_mmap_mutex);
60001 + }
60002 +
60003 + /*
60004 + * Clear hugetlb-related page reserves for children. This only
60005 + * affects MAP_PRIVATE mappings. Faults generated by the child
60006 + * are not guaranteed to succeed, even if read-only
60007 + */
60008 + if (is_vm_hugetlb_page(tmp))
60009 + reset_vma_resv_huge_pages(tmp);
60010 +
60011 + return tmp;
60012 +
60013 +fail_nomem_anon_vma_fork:
60014 + mpol_put(pol);
60015 +fail_nomem_policy:
60016 + kmem_cache_free(vm_area_cachep, tmp);
60017 +fail_nomem:
60018 + vm_unacct_memory(charge);
60019 + return NULL;
60020 +}
60021 +
60022 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
60023 {
60024 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
60025 struct rb_node **rb_link, *rb_parent;
60026 int retval;
60027 - unsigned long charge;
60028 - struct mempolicy *pol;
60029
60030 down_write(&oldmm->mmap_sem);
60031 flush_cache_dup_mm(oldmm);
60032 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
60033 mm->locked_vm = 0;
60034 mm->mmap = NULL;
60035 mm->mmap_cache = NULL;
60036 - mm->free_area_cache = oldmm->mmap_base;
60037 - mm->cached_hole_size = ~0UL;
60038 + mm->free_area_cache = oldmm->free_area_cache;
60039 + mm->cached_hole_size = oldmm->cached_hole_size;
60040 mm->map_count = 0;
60041 cpumask_clear(mm_cpumask(mm));
60042 mm->mm_rb = RB_ROOT;
60043 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
60044
60045 prev = NULL;
60046 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
60047 - struct file *file;
60048 -
60049 if (mpnt->vm_flags & VM_DONTCOPY) {
60050 long pages = vma_pages(mpnt);
60051 mm->total_vm -= pages;
60052 @@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
60053 -pages);
60054 continue;
60055 }
60056 - charge = 0;
60057 - if (mpnt->vm_flags & VM_ACCOUNT) {
60058 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
60059 - if (security_vm_enough_memory(len))
60060 - goto fail_nomem;
60061 - charge = len;
60062 - }
60063 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
60064 - if (!tmp)
60065 - goto fail_nomem;
60066 - *tmp = *mpnt;
60067 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
60068 - pol = mpol_dup(vma_policy(mpnt));
60069 - retval = PTR_ERR(pol);
60070 - if (IS_ERR(pol))
60071 - goto fail_nomem_policy;
60072 - vma_set_policy(tmp, pol);
60073 - tmp->vm_mm = mm;
60074 - if (anon_vma_fork(tmp, mpnt))
60075 - goto fail_nomem_anon_vma_fork;
60076 - tmp->vm_flags &= ~VM_LOCKED;
60077 - tmp->vm_next = tmp->vm_prev = NULL;
60078 - file = tmp->vm_file;
60079 - if (file) {
60080 - struct inode *inode = file->f_path.dentry->d_inode;
60081 - struct address_space *mapping = file->f_mapping;
60082 -
60083 - get_file(file);
60084 - if (tmp->vm_flags & VM_DENYWRITE)
60085 - atomic_dec(&inode->i_writecount);
60086 - mutex_lock(&mapping->i_mmap_mutex);
60087 - if (tmp->vm_flags & VM_SHARED)
60088 - mapping->i_mmap_writable++;
60089 - flush_dcache_mmap_lock(mapping);
60090 - /* insert tmp into the share list, just after mpnt */
60091 - vma_prio_tree_add(tmp, mpnt);
60092 - flush_dcache_mmap_unlock(mapping);
60093 - mutex_unlock(&mapping->i_mmap_mutex);
60094 + tmp = dup_vma(mm, mpnt);
60095 + if (!tmp) {
60096 + retval = -ENOMEM;
60097 + goto out;
60098 }
60099
60100 /*
60101 - * Clear hugetlb-related page reserves for children. This only
60102 - * affects MAP_PRIVATE mappings. Faults generated by the child
60103 - * are not guaranteed to succeed, even if read-only
60104 - */
60105 - if (is_vm_hugetlb_page(tmp))
60106 - reset_vma_resv_huge_pages(tmp);
60107 -
60108 - /*
60109 * Link in the new vma and copy the page table entries.
60110 */
60111 *pprev = tmp;
60112 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
60113 if (retval)
60114 goto out;
60115 }
60116 +
60117 +#ifdef CONFIG_PAX_SEGMEXEC
60118 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
60119 + struct vm_area_struct *mpnt_m;
60120 +
60121 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
60122 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
60123 +
60124 + if (!mpnt->vm_mirror)
60125 + continue;
60126 +
60127 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
60128 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
60129 + mpnt->vm_mirror = mpnt_m;
60130 + } else {
60131 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
60132 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
60133 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
60134 + mpnt->vm_mirror->vm_mirror = mpnt;
60135 + }
60136 + }
60137 + BUG_ON(mpnt_m);
60138 + }
60139 +#endif
60140 +
60141 /* a new mm has just been created */
60142 arch_dup_mmap(oldmm, mm);
60143 retval = 0;
60144 @@ -429,14 +474,6 @@ out:
60145 flush_tlb_mm(oldmm);
60146 up_write(&oldmm->mmap_sem);
60147 return retval;
60148 -fail_nomem_anon_vma_fork:
60149 - mpol_put(pol);
60150 -fail_nomem_policy:
60151 - kmem_cache_free(vm_area_cachep, tmp);
60152 -fail_nomem:
60153 - retval = -ENOMEM;
60154 - vm_unacct_memory(charge);
60155 - goto out;
60156 }
60157
60158 static inline int mm_alloc_pgd(struct mm_struct * mm)
60159 @@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
60160 spin_unlock(&fs->lock);
60161 return -EAGAIN;
60162 }
60163 - fs->users++;
60164 + atomic_inc(&fs->users);
60165 spin_unlock(&fs->lock);
60166 return 0;
60167 }
60168 tsk->fs = copy_fs_struct(fs);
60169 if (!tsk->fs)
60170 return -ENOMEM;
60171 + gr_set_chroot_entries(tsk, &tsk->fs->root);
60172 return 0;
60173 }
60174
60175 @@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
60176 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
60177 #endif
60178 retval = -EAGAIN;
60179 +
60180 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
60181 +
60182 if (atomic_read(&p->real_cred->user->processes) >=
60183 task_rlimit(p, RLIMIT_NPROC)) {
60184 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
60185 - p->real_cred->user != INIT_USER)
60186 + if (p->real_cred->user != INIT_USER &&
60187 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
60188 goto bad_fork_free;
60189 }
60190 + current->flags &= ~PF_NPROC_EXCEEDED;
60191
60192 retval = copy_creds(p, clone_flags);
60193 if (retval < 0)
60194 @@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
60195 if (clone_flags & CLONE_THREAD)
60196 p->tgid = current->tgid;
60197
60198 + gr_copy_label(p);
60199 +
60200 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
60201 /*
60202 * Clear TID on mm_release()?
60203 @@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
60204 bad_fork_free:
60205 free_task(p);
60206 fork_out:
60207 + gr_log_forkfail(retval);
60208 +
60209 return ERR_PTR(retval);
60210 }
60211
60212 @@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
60213 if (clone_flags & CLONE_PARENT_SETTID)
60214 put_user(nr, parent_tidptr);
60215
60216 + gr_handle_brute_check();
60217 +
60218 if (clone_flags & CLONE_VFORK) {
60219 p->vfork_done = &vfork;
60220 init_completion(&vfork);
60221 @@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
60222 return 0;
60223
60224 /* don't need lock here; in the worst case we'll do useless copy */
60225 - if (fs->users == 1)
60226 + if (atomic_read(&fs->users) == 1)
60227 return 0;
60228
60229 *new_fsp = copy_fs_struct(fs);
60230 @@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
60231 fs = current->fs;
60232 spin_lock(&fs->lock);
60233 current->fs = new_fs;
60234 - if (--fs->users)
60235 + gr_set_chroot_entries(current, &current->fs->root);
60236 + if (atomic_dec_return(&fs->users))
60237 new_fs = NULL;
60238 else
60239 new_fs = fs;
60240 diff -urNp linux-3.0.4/kernel/futex.c linux-3.0.4/kernel/futex.c
60241 --- linux-3.0.4/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
60242 +++ linux-3.0.4/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
60243 @@ -54,6 +54,7 @@
60244 #include <linux/mount.h>
60245 #include <linux/pagemap.h>
60246 #include <linux/syscalls.h>
60247 +#include <linux/ptrace.h>
60248 #include <linux/signal.h>
60249 #include <linux/module.h>
60250 #include <linux/magic.h>
60251 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
60252 struct page *page, *page_head;
60253 int err, ro = 0;
60254
60255 +#ifdef CONFIG_PAX_SEGMEXEC
60256 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
60257 + return -EFAULT;
60258 +#endif
60259 +
60260 /*
60261 * The futex address must be "naturally" aligned.
60262 */
60263 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
60264 struct futex_q q = futex_q_init;
60265 int ret;
60266
60267 + pax_track_stack();
60268 +
60269 if (!bitset)
60270 return -EINVAL;
60271 q.bitset = bitset;
60272 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
60273 struct futex_q q = futex_q_init;
60274 int res, ret;
60275
60276 + pax_track_stack();
60277 +
60278 if (!bitset)
60279 return -EINVAL;
60280
60281 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60282 {
60283 struct robust_list_head __user *head;
60284 unsigned long ret;
60285 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60286 const struct cred *cred = current_cred(), *pcred;
60287 +#endif
60288
60289 if (!futex_cmpxchg_enabled)
60290 return -ENOSYS;
60291 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60292 if (!p)
60293 goto err_unlock;
60294 ret = -EPERM;
60295 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60296 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
60297 + goto err_unlock;
60298 +#else
60299 pcred = __task_cred(p);
60300 /* If victim is in different user_ns, then uids are not
60301 comparable, so we must have CAP_SYS_PTRACE */
60302 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60303 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
60304 goto err_unlock;
60305 ok:
60306 +#endif
60307 head = p->robust_list;
60308 rcu_read_unlock();
60309 }
60310 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
60311 {
60312 u32 curval;
60313 int i;
60314 + mm_segment_t oldfs;
60315
60316 /*
60317 * This will fail and we want it. Some arch implementations do
60318 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
60319 * implementation, the non-functional ones will return
60320 * -ENOSYS.
60321 */
60322 + oldfs = get_fs();
60323 + set_fs(USER_DS);
60324 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
60325 futex_cmpxchg_enabled = 1;
60326 + set_fs(oldfs);
60327
60328 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
60329 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
60330 diff -urNp linux-3.0.4/kernel/futex_compat.c linux-3.0.4/kernel/futex_compat.c
60331 --- linux-3.0.4/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
60332 +++ linux-3.0.4/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
60333 @@ -10,6 +10,7 @@
60334 #include <linux/compat.h>
60335 #include <linux/nsproxy.h>
60336 #include <linux/futex.h>
60337 +#include <linux/ptrace.h>
60338
60339 #include <asm/uaccess.h>
60340
60341 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
60342 {
60343 struct compat_robust_list_head __user *head;
60344 unsigned long ret;
60345 - const struct cred *cred = current_cred(), *pcred;
60346 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60347 + const struct cred *cred = current_cred();
60348 + const struct cred *pcred;
60349 +#endif
60350
60351 if (!futex_cmpxchg_enabled)
60352 return -ENOSYS;
60353 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
60354 if (!p)
60355 goto err_unlock;
60356 ret = -EPERM;
60357 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60358 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
60359 + goto err_unlock;
60360 +#else
60361 pcred = __task_cred(p);
60362 /* If victim is in different user_ns, then uids are not
60363 comparable, so we must have CAP_SYS_PTRACE */
60364 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
60365 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
60366 goto err_unlock;
60367 ok:
60368 +#endif
60369 head = p->compat_robust_list;
60370 rcu_read_unlock();
60371 }
60372 diff -urNp linux-3.0.4/kernel/gcov/base.c linux-3.0.4/kernel/gcov/base.c
60373 --- linux-3.0.4/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
60374 +++ linux-3.0.4/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
60375 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
60376 }
60377
60378 #ifdef CONFIG_MODULES
60379 -static inline int within(void *addr, void *start, unsigned long size)
60380 -{
60381 - return ((addr >= start) && (addr < start + size));
60382 -}
60383 -
60384 /* Update list and generate events when modules are unloaded. */
60385 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
60386 void *data)
60387 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
60388 prev = NULL;
60389 /* Remove entries located in module from linked list. */
60390 for (info = gcov_info_head; info; info = info->next) {
60391 - if (within(info, mod->module_core, mod->core_size)) {
60392 + if (within_module_core_rw((unsigned long)info, mod)) {
60393 if (prev)
60394 prev->next = info->next;
60395 else
60396 diff -urNp linux-3.0.4/kernel/hrtimer.c linux-3.0.4/kernel/hrtimer.c
60397 --- linux-3.0.4/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
60398 +++ linux-3.0.4/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
60399 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
60400 local_irq_restore(flags);
60401 }
60402
60403 -static void run_hrtimer_softirq(struct softirq_action *h)
60404 +static void run_hrtimer_softirq(void)
60405 {
60406 hrtimer_peek_ahead_timers();
60407 }
60408 diff -urNp linux-3.0.4/kernel/jump_label.c linux-3.0.4/kernel/jump_label.c
60409 --- linux-3.0.4/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
60410 +++ linux-3.0.4/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
60411 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
60412
60413 size = (((unsigned long)stop - (unsigned long)start)
60414 / sizeof(struct jump_entry));
60415 + pax_open_kernel();
60416 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
60417 + pax_close_kernel();
60418 }
60419
60420 static void jump_label_update(struct jump_label_key *key, int enable);
60421 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module
60422 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
60423 struct jump_entry *iter;
60424
60425 + pax_open_kernel();
60426 for (iter = iter_start; iter < iter_stop; iter++) {
60427 if (within_module_init(iter->code, mod))
60428 iter->code = 0;
60429 }
60430 + pax_close_kernel();
60431 }
60432
60433 static int
60434 diff -urNp linux-3.0.4/kernel/kallsyms.c linux-3.0.4/kernel/kallsyms.c
60435 --- linux-3.0.4/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
60436 +++ linux-3.0.4/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
60437 @@ -11,6 +11,9 @@
60438 * Changed the compression method from stem compression to "table lookup"
60439 * compression (see scripts/kallsyms.c for a more complete description)
60440 */
60441 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60442 +#define __INCLUDED_BY_HIDESYM 1
60443 +#endif
60444 #include <linux/kallsyms.h>
60445 #include <linux/module.h>
60446 #include <linux/init.h>
60447 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
60448
60449 static inline int is_kernel_inittext(unsigned long addr)
60450 {
60451 + if (system_state != SYSTEM_BOOTING)
60452 + return 0;
60453 +
60454 if (addr >= (unsigned long)_sinittext
60455 && addr <= (unsigned long)_einittext)
60456 return 1;
60457 return 0;
60458 }
60459
60460 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60461 +#ifdef CONFIG_MODULES
60462 +static inline int is_module_text(unsigned long addr)
60463 +{
60464 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
60465 + return 1;
60466 +
60467 + addr = ktla_ktva(addr);
60468 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
60469 +}
60470 +#else
60471 +static inline int is_module_text(unsigned long addr)
60472 +{
60473 + return 0;
60474 +}
60475 +#endif
60476 +#endif
60477 +
60478 static inline int is_kernel_text(unsigned long addr)
60479 {
60480 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
60481 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
60482
60483 static inline int is_kernel(unsigned long addr)
60484 {
60485 +
60486 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60487 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
60488 + return 1;
60489 +
60490 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
60491 +#else
60492 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
60493 +#endif
60494 +
60495 return 1;
60496 return in_gate_area_no_mm(addr);
60497 }
60498
60499 static int is_ksym_addr(unsigned long addr)
60500 {
60501 +
60502 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60503 + if (is_module_text(addr))
60504 + return 0;
60505 +#endif
60506 +
60507 if (all_var)
60508 return is_kernel(addr);
60509
60510 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
60511
60512 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
60513 {
60514 - iter->name[0] = '\0';
60515 iter->nameoff = get_symbol_offset(new_pos);
60516 iter->pos = new_pos;
60517 }
60518 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
60519 {
60520 struct kallsym_iter *iter = m->private;
60521
60522 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60523 + if (current_uid())
60524 + return 0;
60525 +#endif
60526 +
60527 /* Some debugging symbols have no name. Ignore them. */
60528 if (!iter->name[0])
60529 return 0;
60530 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
60531 struct kallsym_iter *iter;
60532 int ret;
60533
60534 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
60535 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
60536 if (!iter)
60537 return -ENOMEM;
60538 reset_iter(iter, 0);
60539 diff -urNp linux-3.0.4/kernel/kmod.c linux-3.0.4/kernel/kmod.c
60540 --- linux-3.0.4/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
60541 +++ linux-3.0.4/kernel/kmod.c 2011-08-23 21:48:14.000000000 -0400
60542 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
60543 * If module auto-loading support is disabled then this function
60544 * becomes a no-operation.
60545 */
60546 -int __request_module(bool wait, const char *fmt, ...)
60547 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
60548 {
60549 - va_list args;
60550 char module_name[MODULE_NAME_LEN];
60551 unsigned int max_modprobes;
60552 int ret;
60553 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
60554 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
60555 static char *envp[] = { "HOME=/",
60556 "TERM=linux",
60557 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
60558 @@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
60559 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
60560 static int kmod_loop_msg;
60561
60562 - va_start(args, fmt);
60563 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
60564 - va_end(args);
60565 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
60566 if (ret >= MODULE_NAME_LEN)
60567 return -ENAMETOOLONG;
60568
60569 @@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
60570 if (ret)
60571 return ret;
60572
60573 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60574 + if (!current_uid()) {
60575 + /* hack to workaround consolekit/udisks stupidity */
60576 + read_lock(&tasklist_lock);
60577 + if (!strcmp(current->comm, "mount") &&
60578 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
60579 + read_unlock(&tasklist_lock);
60580 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
60581 + return -EPERM;
60582 + }
60583 + read_unlock(&tasklist_lock);
60584 + }
60585 +#endif
60586 +
60587 /* If modprobe needs a service that is in a module, we get a recursive
60588 * loop. Limit the number of running kmod threads to max_threads/2 or
60589 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60590 @@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
60591 atomic_dec(&kmod_concurrent);
60592 return ret;
60593 }
60594 +
60595 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60596 +{
60597 + va_list args;
60598 + int ret;
60599 +
60600 + va_start(args, fmt);
60601 + ret = ____request_module(wait, module_param, fmt, args);
60602 + va_end(args);
60603 +
60604 + return ret;
60605 +}
60606 +
60607 +int __request_module(bool wait, const char *fmt, ...)
60608 +{
60609 + va_list args;
60610 + int ret;
60611 +
60612 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60613 + if (current_uid()) {
60614 + char module_param[MODULE_NAME_LEN];
60615 +
60616 + memset(module_param, 0, sizeof(module_param));
60617 +
60618 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60619 +
60620 + va_start(args, fmt);
60621 + ret = ____request_module(wait, module_param, fmt, args);
60622 + va_end(args);
60623 +
60624 + return ret;
60625 + }
60626 +#endif
60627 +
60628 + va_start(args, fmt);
60629 + ret = ____request_module(wait, NULL, fmt, args);
60630 + va_end(args);
60631 +
60632 + return ret;
60633 +}
60634 +
60635 EXPORT_SYMBOL(__request_module);
60636 #endif /* CONFIG_MODULES */
60637
60638 diff -urNp linux-3.0.4/kernel/kprobes.c linux-3.0.4/kernel/kprobes.c
60639 --- linux-3.0.4/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
60640 +++ linux-3.0.4/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
60641 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
60642 * kernel image and loaded module images reside. This is required
60643 * so x86_64 can correctly handle the %rip-relative fixups.
60644 */
60645 - kip->insns = module_alloc(PAGE_SIZE);
60646 + kip->insns = module_alloc_exec(PAGE_SIZE);
60647 if (!kip->insns) {
60648 kfree(kip);
60649 return NULL;
60650 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
60651 */
60652 if (!list_is_singular(&kip->list)) {
60653 list_del(&kip->list);
60654 - module_free(NULL, kip->insns);
60655 + module_free_exec(NULL, kip->insns);
60656 kfree(kip);
60657 }
60658 return 1;
60659 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
60660 {
60661 int i, err = 0;
60662 unsigned long offset = 0, size = 0;
60663 - char *modname, namebuf[128];
60664 + char *modname, namebuf[KSYM_NAME_LEN];
60665 const char *symbol_name;
60666 void *addr;
60667 struct kprobe_blackpoint *kb;
60668 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
60669 const char *sym = NULL;
60670 unsigned int i = *(loff_t *) v;
60671 unsigned long offset = 0;
60672 - char *modname, namebuf[128];
60673 + char *modname, namebuf[KSYM_NAME_LEN];
60674
60675 head = &kprobe_table[i];
60676 preempt_disable();
60677 diff -urNp linux-3.0.4/kernel/lockdep.c linux-3.0.4/kernel/lockdep.c
60678 --- linux-3.0.4/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
60679 +++ linux-3.0.4/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
60680 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
60681 end = (unsigned long) &_end,
60682 addr = (unsigned long) obj;
60683
60684 +#ifdef CONFIG_PAX_KERNEXEC
60685 + start = ktla_ktva(start);
60686 +#endif
60687 +
60688 /*
60689 * static variable?
60690 */
60691 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
60692 if (!static_obj(lock->key)) {
60693 debug_locks_off();
60694 printk("INFO: trying to register non-static key.\n");
60695 + printk("lock:%pS key:%pS.\n", lock, lock->key);
60696 printk("the code is fine but needs lockdep annotation.\n");
60697 printk("turning off the locking correctness validator.\n");
60698 dump_stack();
60699 @@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
60700 if (!class)
60701 return 0;
60702 }
60703 - atomic_inc((atomic_t *)&class->ops);
60704 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
60705 if (very_verbose(class)) {
60706 printk("\nacquire class [%p] %s", class->key, class->name);
60707 if (class->name_version > 1)
60708 diff -urNp linux-3.0.4/kernel/lockdep_proc.c linux-3.0.4/kernel/lockdep_proc.c
60709 --- linux-3.0.4/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
60710 +++ linux-3.0.4/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
60711 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60712
60713 static void print_name(struct seq_file *m, struct lock_class *class)
60714 {
60715 - char str[128];
60716 + char str[KSYM_NAME_LEN];
60717 const char *name = class->name;
60718
60719 if (!name) {
60720 diff -urNp linux-3.0.4/kernel/module.c linux-3.0.4/kernel/module.c
60721 --- linux-3.0.4/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
60722 +++ linux-3.0.4/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
60723 @@ -58,6 +58,7 @@
60724 #include <linux/jump_label.h>
60725 #include <linux/pfn.h>
60726 #include <linux/bsearch.h>
60727 +#include <linux/grsecurity.h>
60728
60729 #define CREATE_TRACE_POINTS
60730 #include <trace/events/module.h>
60731 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
60732
60733 /* Bounds of module allocation, for speeding __module_address.
60734 * Protected by module_mutex. */
60735 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60736 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60737 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60738
60739 int register_module_notifier(struct notifier_block * nb)
60740 {
60741 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
60742 return true;
60743
60744 list_for_each_entry_rcu(mod, &modules, list) {
60745 - struct symsearch arr[] = {
60746 + struct symsearch modarr[] = {
60747 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60748 NOT_GPL_ONLY, false },
60749 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60750 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
60751 #endif
60752 };
60753
60754 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60755 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60756 return true;
60757 }
60758 return false;
60759 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
60760 static int percpu_modalloc(struct module *mod,
60761 unsigned long size, unsigned long align)
60762 {
60763 - if (align > PAGE_SIZE) {
60764 + if (align-1 >= PAGE_SIZE) {
60765 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60766 mod->name, align, PAGE_SIZE);
60767 align = PAGE_SIZE;
60768 @@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
60769 */
60770 #ifdef CONFIG_SYSFS
60771
60772 -#ifdef CONFIG_KALLSYMS
60773 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60774 static inline bool sect_empty(const Elf_Shdr *sect)
60775 {
60776 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
60777 @@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
60778
60779 static void unset_module_core_ro_nx(struct module *mod)
60780 {
60781 - set_page_attributes(mod->module_core + mod->core_text_size,
60782 - mod->module_core + mod->core_size,
60783 + set_page_attributes(mod->module_core_rw,
60784 + mod->module_core_rw + mod->core_size_rw,
60785 set_memory_x);
60786 - set_page_attributes(mod->module_core,
60787 - mod->module_core + mod->core_ro_size,
60788 + set_page_attributes(mod->module_core_rx,
60789 + mod->module_core_rx + mod->core_size_rx,
60790 set_memory_rw);
60791 }
60792
60793 static void unset_module_init_ro_nx(struct module *mod)
60794 {
60795 - set_page_attributes(mod->module_init + mod->init_text_size,
60796 - mod->module_init + mod->init_size,
60797 + set_page_attributes(mod->module_init_rw,
60798 + mod->module_init_rw + mod->init_size_rw,
60799 set_memory_x);
60800 - set_page_attributes(mod->module_init,
60801 - mod->module_init + mod->init_ro_size,
60802 + set_page_attributes(mod->module_init_rx,
60803 + mod->module_init_rx + mod->init_size_rx,
60804 set_memory_rw);
60805 }
60806
60807 @@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
60808
60809 mutex_lock(&module_mutex);
60810 list_for_each_entry_rcu(mod, &modules, list) {
60811 - if ((mod->module_core) && (mod->core_text_size)) {
60812 - set_page_attributes(mod->module_core,
60813 - mod->module_core + mod->core_text_size,
60814 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
60815 + set_page_attributes(mod->module_core_rx,
60816 + mod->module_core_rx + mod->core_size_rx,
60817 set_memory_rw);
60818 }
60819 - if ((mod->module_init) && (mod->init_text_size)) {
60820 - set_page_attributes(mod->module_init,
60821 - mod->module_init + mod->init_text_size,
60822 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
60823 + set_page_attributes(mod->module_init_rx,
60824 + mod->module_init_rx + mod->init_size_rx,
60825 set_memory_rw);
60826 }
60827 }
60828 @@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
60829
60830 mutex_lock(&module_mutex);
60831 list_for_each_entry_rcu(mod, &modules, list) {
60832 - if ((mod->module_core) && (mod->core_text_size)) {
60833 - set_page_attributes(mod->module_core,
60834 - mod->module_core + mod->core_text_size,
60835 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
60836 + set_page_attributes(mod->module_core_rx,
60837 + mod->module_core_rx + mod->core_size_rx,
60838 set_memory_ro);
60839 }
60840 - if ((mod->module_init) && (mod->init_text_size)) {
60841 - set_page_attributes(mod->module_init,
60842 - mod->module_init + mod->init_text_size,
60843 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
60844 + set_page_attributes(mod->module_init_rx,
60845 + mod->module_init_rx + mod->init_size_rx,
60846 set_memory_ro);
60847 }
60848 }
60849 @@ -1722,16 +1724,19 @@ static void free_module(struct module *m
60850
60851 /* This may be NULL, but that's OK */
60852 unset_module_init_ro_nx(mod);
60853 - module_free(mod, mod->module_init);
60854 + module_free(mod, mod->module_init_rw);
60855 + module_free_exec(mod, mod->module_init_rx);
60856 kfree(mod->args);
60857 percpu_modfree(mod);
60858
60859 /* Free lock-classes: */
60860 - lockdep_free_key_range(mod->module_core, mod->core_size);
60861 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60862 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60863
60864 /* Finally, free the core (containing the module structure) */
60865 unset_module_core_ro_nx(mod);
60866 - module_free(mod, mod->module_core);
60867 + module_free_exec(mod, mod->module_core_rx);
60868 + module_free(mod, mod->module_core_rw);
60869
60870 #ifdef CONFIG_MPU
60871 update_protections(current->mm);
60872 @@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
60873 unsigned int i;
60874 int ret = 0;
60875 const struct kernel_symbol *ksym;
60876 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60877 + int is_fs_load = 0;
60878 + int register_filesystem_found = 0;
60879 + char *p;
60880 +
60881 + p = strstr(mod->args, "grsec_modharden_fs");
60882 + if (p) {
60883 + char *endptr = p + strlen("grsec_modharden_fs");
60884 + /* copy \0 as well */
60885 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60886 + is_fs_load = 1;
60887 + }
60888 +#endif
60889
60890 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
60891 const char *name = info->strtab + sym[i].st_name;
60892
60893 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60894 + /* it's a real shame this will never get ripped and copied
60895 + upstream! ;(
60896 + */
60897 + if (is_fs_load && !strcmp(name, "register_filesystem"))
60898 + register_filesystem_found = 1;
60899 +#endif
60900 +
60901 switch (sym[i].st_shndx) {
60902 case SHN_COMMON:
60903 /* We compiled with -fno-common. These are not
60904 @@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
60905 ksym = resolve_symbol_wait(mod, info, name);
60906 /* Ok if resolved. */
60907 if (ksym && !IS_ERR(ksym)) {
60908 + pax_open_kernel();
60909 sym[i].st_value = ksym->value;
60910 + pax_close_kernel();
60911 break;
60912 }
60913
60914 @@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
60915 secbase = (unsigned long)mod_percpu(mod);
60916 else
60917 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
60918 + pax_open_kernel();
60919 sym[i].st_value += secbase;
60920 + pax_close_kernel();
60921 break;
60922 }
60923 }
60924
60925 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60926 + if (is_fs_load && !register_filesystem_found) {
60927 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60928 + ret = -EPERM;
60929 + }
60930 +#endif
60931 +
60932 return ret;
60933 }
60934
60935 @@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
60936 || s->sh_entsize != ~0UL
60937 || strstarts(sname, ".init"))
60938 continue;
60939 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60940 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60941 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60942 + else
60943 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60944 DEBUGP("\t%s\n", name);
60945 }
60946 - switch (m) {
60947 - case 0: /* executable */
60948 - mod->core_size = debug_align(mod->core_size);
60949 - mod->core_text_size = mod->core_size;
60950 - break;
60951 - case 1: /* RO: text and ro-data */
60952 - mod->core_size = debug_align(mod->core_size);
60953 - mod->core_ro_size = mod->core_size;
60954 - break;
60955 - case 3: /* whole core */
60956 - mod->core_size = debug_align(mod->core_size);
60957 - break;
60958 - }
60959 }
60960
60961 DEBUGP("Init section allocation order:\n");
60962 @@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
60963 || s->sh_entsize != ~0UL
60964 || !strstarts(sname, ".init"))
60965 continue;
60966 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60967 - | INIT_OFFSET_MASK);
60968 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60969 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60970 + else
60971 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60972 + s->sh_entsize |= INIT_OFFSET_MASK;
60973 DEBUGP("\t%s\n", sname);
60974 }
60975 - switch (m) {
60976 - case 0: /* executable */
60977 - mod->init_size = debug_align(mod->init_size);
60978 - mod->init_text_size = mod->init_size;
60979 - break;
60980 - case 1: /* RO: text and ro-data */
60981 - mod->init_size = debug_align(mod->init_size);
60982 - mod->init_ro_size = mod->init_size;
60983 - break;
60984 - case 3: /* whole init */
60985 - mod->init_size = debug_align(mod->init_size);
60986 - break;
60987 - }
60988 }
60989 }
60990
60991 @@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
60992
60993 /* Put symbol section at end of init part of module. */
60994 symsect->sh_flags |= SHF_ALLOC;
60995 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60996 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60997 info->index.sym) | INIT_OFFSET_MASK;
60998 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
60999
61000 @@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
61001 }
61002
61003 /* Append room for core symbols at end of core part. */
61004 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
61005 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
61006 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
61007 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
61008
61009 /* Put string table section at end of init part of module. */
61010 strsect->sh_flags |= SHF_ALLOC;
61011 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
61012 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
61013 info->index.str) | INIT_OFFSET_MASK;
61014 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
61015
61016 /* Append room for core symbols' strings at end of core part. */
61017 - info->stroffs = mod->core_size;
61018 + info->stroffs = mod->core_size_rx;
61019 __set_bit(0, info->strmap);
61020 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
61021 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
61022 }
61023
61024 static void add_kallsyms(struct module *mod, const struct load_info *info)
61025 @@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
61026 /* Make sure we get permanent strtab: don't use info->strtab. */
61027 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
61028
61029 + pax_open_kernel();
61030 +
61031 /* Set types up while we still have access to sections. */
61032 for (i = 0; i < mod->num_symtab; i++)
61033 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
61034
61035 - mod->core_symtab = dst = mod->module_core + info->symoffs;
61036 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
61037 src = mod->symtab;
61038 *dst = *src;
61039 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
61040 @@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
61041 }
61042 mod->core_num_syms = ndst;
61043
61044 - mod->core_strtab = s = mod->module_core + info->stroffs;
61045 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
61046 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
61047 if (test_bit(i, info->strmap))
61048 *++s = mod->strtab[i];
61049 +
61050 + pax_close_kernel();
61051 }
61052 #else
61053 static inline void layout_symtab(struct module *mod, struct load_info *info)
61054 @@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
61055 ddebug_remove_module(debug->modname);
61056 }
61057
61058 -static void *module_alloc_update_bounds(unsigned long size)
61059 +static void *module_alloc_update_bounds_rw(unsigned long size)
61060 {
61061 void *ret = module_alloc(size);
61062
61063 if (ret) {
61064 mutex_lock(&module_mutex);
61065 /* Update module bounds. */
61066 - if ((unsigned long)ret < module_addr_min)
61067 - module_addr_min = (unsigned long)ret;
61068 - if ((unsigned long)ret + size > module_addr_max)
61069 - module_addr_max = (unsigned long)ret + size;
61070 + if ((unsigned long)ret < module_addr_min_rw)
61071 + module_addr_min_rw = (unsigned long)ret;
61072 + if ((unsigned long)ret + size > module_addr_max_rw)
61073 + module_addr_max_rw = (unsigned long)ret + size;
61074 + mutex_unlock(&module_mutex);
61075 + }
61076 + return ret;
61077 +}
61078 +
61079 +static void *module_alloc_update_bounds_rx(unsigned long size)
61080 +{
61081 + void *ret = module_alloc_exec(size);
61082 +
61083 + if (ret) {
61084 + mutex_lock(&module_mutex);
61085 + /* Update module bounds. */
61086 + if ((unsigned long)ret < module_addr_min_rx)
61087 + module_addr_min_rx = (unsigned long)ret;
61088 + if ((unsigned long)ret + size > module_addr_max_rx)
61089 + module_addr_max_rx = (unsigned long)ret + size;
61090 mutex_unlock(&module_mutex);
61091 }
61092 return ret;
61093 @@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
61094 void *ptr;
61095
61096 /* Do the allocs. */
61097 - ptr = module_alloc_update_bounds(mod->core_size);
61098 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
61099 /*
61100 * The pointer to this block is stored in the module structure
61101 * which is inside the block. Just mark it as not being a
61102 @@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
61103 if (!ptr)
61104 return -ENOMEM;
61105
61106 - memset(ptr, 0, mod->core_size);
61107 - mod->module_core = ptr;
61108 + memset(ptr, 0, mod->core_size_rw);
61109 + mod->module_core_rw = ptr;
61110
61111 - ptr = module_alloc_update_bounds(mod->init_size);
61112 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
61113 /*
61114 * The pointer to this block is stored in the module structure
61115 * which is inside the block. This block doesn't need to be
61116 * scanned as it contains data and code that will be freed
61117 * after the module is initialized.
61118 */
61119 - kmemleak_ignore(ptr);
61120 - if (!ptr && mod->init_size) {
61121 - module_free(mod, mod->module_core);
61122 + kmemleak_not_leak(ptr);
61123 + if (!ptr && mod->init_size_rw) {
61124 + module_free(mod, mod->module_core_rw);
61125 return -ENOMEM;
61126 }
61127 - memset(ptr, 0, mod->init_size);
61128 - mod->module_init = ptr;
61129 + memset(ptr, 0, mod->init_size_rw);
61130 + mod->module_init_rw = ptr;
61131 +
61132 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
61133 + kmemleak_not_leak(ptr);
61134 + if (!ptr) {
61135 + module_free(mod, mod->module_init_rw);
61136 + module_free(mod, mod->module_core_rw);
61137 + return -ENOMEM;
61138 + }
61139 +
61140 + pax_open_kernel();
61141 + memset(ptr, 0, mod->core_size_rx);
61142 + pax_close_kernel();
61143 + mod->module_core_rx = ptr;
61144 +
61145 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
61146 + kmemleak_not_leak(ptr);
61147 + if (!ptr && mod->init_size_rx) {
61148 + module_free_exec(mod, mod->module_core_rx);
61149 + module_free(mod, mod->module_init_rw);
61150 + module_free(mod, mod->module_core_rw);
61151 + return -ENOMEM;
61152 + }
61153 +
61154 + pax_open_kernel();
61155 + memset(ptr, 0, mod->init_size_rx);
61156 + pax_close_kernel();
61157 + mod->module_init_rx = ptr;
61158
61159 /* Transfer each section which specifies SHF_ALLOC */
61160 DEBUGP("final section addresses:\n");
61161 @@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
61162 if (!(shdr->sh_flags & SHF_ALLOC))
61163 continue;
61164
61165 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
61166 - dest = mod->module_init
61167 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
61168 - else
61169 - dest = mod->module_core + shdr->sh_entsize;
61170 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
61171 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
61172 + dest = mod->module_init_rw
61173 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
61174 + else
61175 + dest = mod->module_init_rx
61176 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
61177 + } else {
61178 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
61179 + dest = mod->module_core_rw + shdr->sh_entsize;
61180 + else
61181 + dest = mod->module_core_rx + shdr->sh_entsize;
61182 + }
61183 +
61184 + if (shdr->sh_type != SHT_NOBITS) {
61185 +
61186 +#ifdef CONFIG_PAX_KERNEXEC
61187 +#ifdef CONFIG_X86_64
61188 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
61189 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
61190 +#endif
61191 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
61192 + pax_open_kernel();
61193 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
61194 + pax_close_kernel();
61195 + } else
61196 +#endif
61197
61198 - if (shdr->sh_type != SHT_NOBITS)
61199 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
61200 + }
61201 /* Update sh_addr to point to copy in image. */
61202 - shdr->sh_addr = (unsigned long)dest;
61203 +
61204 +#ifdef CONFIG_PAX_KERNEXEC
61205 + if (shdr->sh_flags & SHF_EXECINSTR)
61206 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
61207 + else
61208 +#endif
61209 +
61210 + shdr->sh_addr = (unsigned long)dest;
61211 DEBUGP("\t0x%lx %s\n",
61212 shdr->sh_addr, info->secstrings + shdr->sh_name);
61213 }
61214 @@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
61215 * Do it before processing of module parameters, so the module
61216 * can provide parameter accessor functions of its own.
61217 */
61218 - if (mod->module_init)
61219 - flush_icache_range((unsigned long)mod->module_init,
61220 - (unsigned long)mod->module_init
61221 - + mod->init_size);
61222 - flush_icache_range((unsigned long)mod->module_core,
61223 - (unsigned long)mod->module_core + mod->core_size);
61224 + if (mod->module_init_rx)
61225 + flush_icache_range((unsigned long)mod->module_init_rx,
61226 + (unsigned long)mod->module_init_rx
61227 + + mod->init_size_rx);
61228 + flush_icache_range((unsigned long)mod->module_core_rx,
61229 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
61230
61231 set_fs(old_fs);
61232 }
61233 @@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
61234 {
61235 kfree(info->strmap);
61236 percpu_modfree(mod);
61237 - module_free(mod, mod->module_init);
61238 - module_free(mod, mod->module_core);
61239 + module_free_exec(mod, mod->module_init_rx);
61240 + module_free_exec(mod, mod->module_core_rx);
61241 + module_free(mod, mod->module_init_rw);
61242 + module_free(mod, mod->module_core_rw);
61243 }
61244
61245 static int post_relocation(struct module *mod, const struct load_info *info)
61246 @@ -2770,9 +2865,38 @@ static struct module *load_module(void _
61247 if (err)
61248 goto free_unload;
61249
61250 + /* Now copy in args */
61251 + mod->args = strndup_user(uargs, ~0UL >> 1);
61252 + if (IS_ERR(mod->args)) {
61253 + err = PTR_ERR(mod->args);
61254 + goto free_unload;
61255 + }
61256 +
61257 /* Set up MODINFO_ATTR fields */
61258 setup_modinfo(mod, &info);
61259
61260 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
61261 + {
61262 + char *p, *p2;
61263 +
61264 + if (strstr(mod->args, "grsec_modharden_netdev")) {
61265 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
61266 + err = -EPERM;
61267 + goto free_modinfo;
61268 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
61269 + p += strlen("grsec_modharden_normal");
61270 + p2 = strstr(p, "_");
61271 + if (p2) {
61272 + *p2 = '\0';
61273 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
61274 + *p2 = '_';
61275 + }
61276 + err = -EPERM;
61277 + goto free_modinfo;
61278 + }
61279 + }
61280 +#endif
61281 +
61282 /* Fix up syms, so that st_value is a pointer to location. */
61283 err = simplify_symbols(mod, &info);
61284 if (err < 0)
61285 @@ -2788,13 +2912,6 @@ static struct module *load_module(void _
61286
61287 flush_module_icache(mod);
61288
61289 - /* Now copy in args */
61290 - mod->args = strndup_user(uargs, ~0UL >> 1);
61291 - if (IS_ERR(mod->args)) {
61292 - err = PTR_ERR(mod->args);
61293 - goto free_arch_cleanup;
61294 - }
61295 -
61296 /* Mark state as coming so strong_try_module_get() ignores us. */
61297 mod->state = MODULE_STATE_COMING;
61298
61299 @@ -2854,11 +2971,10 @@ static struct module *load_module(void _
61300 unlock:
61301 mutex_unlock(&module_mutex);
61302 synchronize_sched();
61303 - kfree(mod->args);
61304 - free_arch_cleanup:
61305 module_arch_cleanup(mod);
61306 free_modinfo:
61307 free_modinfo(mod);
61308 + kfree(mod->args);
61309 free_unload:
61310 module_unload_free(mod);
61311 free_module:
61312 @@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
61313 MODULE_STATE_COMING, mod);
61314
61315 /* Set RO and NX regions for core */
61316 - set_section_ro_nx(mod->module_core,
61317 - mod->core_text_size,
61318 - mod->core_ro_size,
61319 - mod->core_size);
61320 + set_section_ro_nx(mod->module_core_rx,
61321 + mod->core_size_rx,
61322 + mod->core_size_rx,
61323 + mod->core_size_rx);
61324
61325 /* Set RO and NX regions for init */
61326 - set_section_ro_nx(mod->module_init,
61327 - mod->init_text_size,
61328 - mod->init_ro_size,
61329 - mod->init_size);
61330 + set_section_ro_nx(mod->module_init_rx,
61331 + mod->init_size_rx,
61332 + mod->init_size_rx,
61333 + mod->init_size_rx);
61334
61335 do_mod_ctors(mod);
61336 /* Start the module */
61337 @@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
61338 mod->strtab = mod->core_strtab;
61339 #endif
61340 unset_module_init_ro_nx(mod);
61341 - module_free(mod, mod->module_init);
61342 - mod->module_init = NULL;
61343 - mod->init_size = 0;
61344 - mod->init_ro_size = 0;
61345 - mod->init_text_size = 0;
61346 + module_free(mod, mod->module_init_rw);
61347 + module_free_exec(mod, mod->module_init_rx);
61348 + mod->module_init_rw = NULL;
61349 + mod->module_init_rx = NULL;
61350 + mod->init_size_rw = 0;
61351 + mod->init_size_rx = 0;
61352 mutex_unlock(&module_mutex);
61353
61354 return 0;
61355 @@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
61356 unsigned long nextval;
61357
61358 /* At worse, next value is at end of module */
61359 - if (within_module_init(addr, mod))
61360 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
61361 + if (within_module_init_rx(addr, mod))
61362 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
61363 + else if (within_module_init_rw(addr, mod))
61364 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
61365 + else if (within_module_core_rx(addr, mod))
61366 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
61367 + else if (within_module_core_rw(addr, mod))
61368 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
61369 else
61370 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
61371 + return NULL;
61372
61373 /* Scan for closest preceding symbol, and next symbol. (ELF
61374 starts real symbols at 1). */
61375 @@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
61376 char buf[8];
61377
61378 seq_printf(m, "%s %u",
61379 - mod->name, mod->init_size + mod->core_size);
61380 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
61381 print_unload_info(m, mod);
61382
61383 /* Informative for users. */
61384 @@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
61385 mod->state == MODULE_STATE_COMING ? "Loading":
61386 "Live");
61387 /* Used by oprofile and other similar tools. */
61388 - seq_printf(m, " 0x%pK", mod->module_core);
61389 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
61390
61391 /* Taints info */
61392 if (mod->taints)
61393 @@ -3283,7 +3406,17 @@ static const struct file_operations proc
61394
61395 static int __init proc_modules_init(void)
61396 {
61397 +#ifndef CONFIG_GRKERNSEC_HIDESYM
61398 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61399 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61400 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61401 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
61402 +#else
61403 proc_create("modules", 0, NULL, &proc_modules_operations);
61404 +#endif
61405 +#else
61406 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61407 +#endif
61408 return 0;
61409 }
61410 module_init(proc_modules_init);
61411 @@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
61412 {
61413 struct module *mod;
61414
61415 - if (addr < module_addr_min || addr > module_addr_max)
61416 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
61417 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
61418 return NULL;
61419
61420 list_for_each_entry_rcu(mod, &modules, list)
61421 - if (within_module_core(addr, mod)
61422 - || within_module_init(addr, mod))
61423 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
61424 return mod;
61425 return NULL;
61426 }
61427 @@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
61428 */
61429 struct module *__module_text_address(unsigned long addr)
61430 {
61431 - struct module *mod = __module_address(addr);
61432 + struct module *mod;
61433 +
61434 +#ifdef CONFIG_X86_32
61435 + addr = ktla_ktva(addr);
61436 +#endif
61437 +
61438 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
61439 + return NULL;
61440 +
61441 + mod = __module_address(addr);
61442 +
61443 if (mod) {
61444 /* Make sure it's within the text section. */
61445 - if (!within(addr, mod->module_init, mod->init_text_size)
61446 - && !within(addr, mod->module_core, mod->core_text_size))
61447 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
61448 mod = NULL;
61449 }
61450 return mod;
61451 diff -urNp linux-3.0.4/kernel/mutex.c linux-3.0.4/kernel/mutex.c
61452 --- linux-3.0.4/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
61453 +++ linux-3.0.4/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
61454 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
61455 spin_lock_mutex(&lock->wait_lock, flags);
61456
61457 debug_mutex_lock_common(lock, &waiter);
61458 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
61459 + debug_mutex_add_waiter(lock, &waiter, task);
61460
61461 /* add waiting tasks to the end of the waitqueue (FIFO): */
61462 list_add_tail(&waiter.list, &lock->wait_list);
61463 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
61464 * TASK_UNINTERRUPTIBLE case.)
61465 */
61466 if (unlikely(signal_pending_state(state, task))) {
61467 - mutex_remove_waiter(lock, &waiter,
61468 - task_thread_info(task));
61469 + mutex_remove_waiter(lock, &waiter, task);
61470 mutex_release(&lock->dep_map, 1, ip);
61471 spin_unlock_mutex(&lock->wait_lock, flags);
61472
61473 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
61474 done:
61475 lock_acquired(&lock->dep_map, ip);
61476 /* got the lock - rejoice! */
61477 - mutex_remove_waiter(lock, &waiter, current_thread_info());
61478 + mutex_remove_waiter(lock, &waiter, task);
61479 mutex_set_owner(lock);
61480
61481 /* set it to 0 if there are no waiters left: */
61482 diff -urNp linux-3.0.4/kernel/mutex-debug.c linux-3.0.4/kernel/mutex-debug.c
61483 --- linux-3.0.4/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
61484 +++ linux-3.0.4/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
61485 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
61486 }
61487
61488 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61489 - struct thread_info *ti)
61490 + struct task_struct *task)
61491 {
61492 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
61493
61494 /* Mark the current thread as blocked on the lock: */
61495 - ti->task->blocked_on = waiter;
61496 + task->blocked_on = waiter;
61497 }
61498
61499 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61500 - struct thread_info *ti)
61501 + struct task_struct *task)
61502 {
61503 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
61504 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
61505 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
61506 - ti->task->blocked_on = NULL;
61507 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
61508 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
61509 + task->blocked_on = NULL;
61510
61511 list_del_init(&waiter->list);
61512 waiter->task = NULL;
61513 diff -urNp linux-3.0.4/kernel/mutex-debug.h linux-3.0.4/kernel/mutex-debug.h
61514 --- linux-3.0.4/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
61515 +++ linux-3.0.4/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
61516 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
61517 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
61518 extern void debug_mutex_add_waiter(struct mutex *lock,
61519 struct mutex_waiter *waiter,
61520 - struct thread_info *ti);
61521 + struct task_struct *task);
61522 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61523 - struct thread_info *ti);
61524 + struct task_struct *task);
61525 extern void debug_mutex_unlock(struct mutex *lock);
61526 extern void debug_mutex_init(struct mutex *lock, const char *name,
61527 struct lock_class_key *key);
61528 diff -urNp linux-3.0.4/kernel/padata.c linux-3.0.4/kernel/padata.c
61529 --- linux-3.0.4/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
61530 +++ linux-3.0.4/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
61531 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
61532 padata->pd = pd;
61533 padata->cb_cpu = cb_cpu;
61534
61535 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
61536 - atomic_set(&pd->seq_nr, -1);
61537 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
61538 + atomic_set_unchecked(&pd->seq_nr, -1);
61539
61540 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
61541 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
61542
61543 target_cpu = padata_cpu_hash(padata);
61544 queue = per_cpu_ptr(pd->pqueue, target_cpu);
61545 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
61546 padata_init_pqueues(pd);
61547 padata_init_squeues(pd);
61548 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
61549 - atomic_set(&pd->seq_nr, -1);
61550 + atomic_set_unchecked(&pd->seq_nr, -1);
61551 atomic_set(&pd->reorder_objects, 0);
61552 atomic_set(&pd->refcnt, 0);
61553 pd->pinst = pinst;
61554 diff -urNp linux-3.0.4/kernel/panic.c linux-3.0.4/kernel/panic.c
61555 --- linux-3.0.4/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
61556 +++ linux-3.0.4/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
61557 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
61558 const char *board;
61559
61560 printk(KERN_WARNING "------------[ cut here ]------------\n");
61561 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
61562 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
61563 board = dmi_get_system_info(DMI_PRODUCT_NAME);
61564 if (board)
61565 printk(KERN_WARNING "Hardware name: %s\n", board);
61566 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
61567 */
61568 void __stack_chk_fail(void)
61569 {
61570 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
61571 + dump_stack();
61572 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
61573 __builtin_return_address(0));
61574 }
61575 EXPORT_SYMBOL(__stack_chk_fail);
61576 diff -urNp linux-3.0.4/kernel/pid.c linux-3.0.4/kernel/pid.c
61577 --- linux-3.0.4/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
61578 +++ linux-3.0.4/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
61579 @@ -33,6 +33,7 @@
61580 #include <linux/rculist.h>
61581 #include <linux/bootmem.h>
61582 #include <linux/hash.h>
61583 +#include <linux/security.h>
61584 #include <linux/pid_namespace.h>
61585 #include <linux/init_task.h>
61586 #include <linux/syscalls.h>
61587 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61588
61589 int pid_max = PID_MAX_DEFAULT;
61590
61591 -#define RESERVED_PIDS 300
61592 +#define RESERVED_PIDS 500
61593
61594 int pid_max_min = RESERVED_PIDS + 1;
61595 int pid_max_max = PID_MAX_LIMIT;
61596 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
61597 */
61598 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61599 {
61600 + struct task_struct *task;
61601 +
61602 rcu_lockdep_assert(rcu_read_lock_held());
61603 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61604 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61605 +
61606 + if (gr_pid_is_chrooted(task))
61607 + return NULL;
61608 +
61609 + return task;
61610 }
61611
61612 struct task_struct *find_task_by_vpid(pid_t vnr)
61613 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
61614 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61615 }
61616
61617 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61618 +{
61619 + rcu_lockdep_assert(rcu_read_lock_held());
61620 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61621 +}
61622 +
61623 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61624 {
61625 struct pid *pid;
61626 diff -urNp linux-3.0.4/kernel/posix-cpu-timers.c linux-3.0.4/kernel/posix-cpu-timers.c
61627 --- linux-3.0.4/kernel/posix-cpu-timers.c 2011-07-21 22:17:23.000000000 -0400
61628 +++ linux-3.0.4/kernel/posix-cpu-timers.c 2011-08-23 21:48:14.000000000 -0400
61629 @@ -6,6 +6,7 @@
61630 #include <linux/posix-timers.h>
61631 #include <linux/errno.h>
61632 #include <linux/math64.h>
61633 +#include <linux/security.h>
61634 #include <asm/uaccess.h>
61635 #include <linux/kernel_stat.h>
61636 #include <trace/events/timer.h>
61637 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
61638
61639 static __init int init_posix_cpu_timers(void)
61640 {
61641 - struct k_clock process = {
61642 + static struct k_clock process = {
61643 .clock_getres = process_cpu_clock_getres,
61644 .clock_get = process_cpu_clock_get,
61645 .timer_create = process_cpu_timer_create,
61646 .nsleep = process_cpu_nsleep,
61647 .nsleep_restart = process_cpu_nsleep_restart,
61648 };
61649 - struct k_clock thread = {
61650 + static struct k_clock thread = {
61651 .clock_getres = thread_cpu_clock_getres,
61652 .clock_get = thread_cpu_clock_get,
61653 .timer_create = thread_cpu_timer_create,
61654 diff -urNp linux-3.0.4/kernel/posix-timers.c linux-3.0.4/kernel/posix-timers.c
61655 --- linux-3.0.4/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
61656 +++ linux-3.0.4/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
61657 @@ -43,6 +43,7 @@
61658 #include <linux/idr.h>
61659 #include <linux/posix-clock.h>
61660 #include <linux/posix-timers.h>
61661 +#include <linux/grsecurity.h>
61662 #include <linux/syscalls.h>
61663 #include <linux/wait.h>
61664 #include <linux/workqueue.h>
61665 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
61666 * which we beg off on and pass to do_sys_settimeofday().
61667 */
61668
61669 -static struct k_clock posix_clocks[MAX_CLOCKS];
61670 +static struct k_clock *posix_clocks[MAX_CLOCKS];
61671
61672 /*
61673 * These ones are defined below.
61674 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
61675 */
61676 static __init int init_posix_timers(void)
61677 {
61678 - struct k_clock clock_realtime = {
61679 + static struct k_clock clock_realtime = {
61680 .clock_getres = hrtimer_get_res,
61681 .clock_get = posix_clock_realtime_get,
61682 .clock_set = posix_clock_realtime_set,
61683 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
61684 .timer_get = common_timer_get,
61685 .timer_del = common_timer_del,
61686 };
61687 - struct k_clock clock_monotonic = {
61688 + static struct k_clock clock_monotonic = {
61689 .clock_getres = hrtimer_get_res,
61690 .clock_get = posix_ktime_get_ts,
61691 .nsleep = common_nsleep,
61692 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
61693 .timer_get = common_timer_get,
61694 .timer_del = common_timer_del,
61695 };
61696 - struct k_clock clock_monotonic_raw = {
61697 + static struct k_clock clock_monotonic_raw = {
61698 .clock_getres = hrtimer_get_res,
61699 .clock_get = posix_get_monotonic_raw,
61700 };
61701 - struct k_clock clock_realtime_coarse = {
61702 + static struct k_clock clock_realtime_coarse = {
61703 .clock_getres = posix_get_coarse_res,
61704 .clock_get = posix_get_realtime_coarse,
61705 };
61706 - struct k_clock clock_monotonic_coarse = {
61707 + static struct k_clock clock_monotonic_coarse = {
61708 .clock_getres = posix_get_coarse_res,
61709 .clock_get = posix_get_monotonic_coarse,
61710 };
61711 - struct k_clock clock_boottime = {
61712 + static struct k_clock clock_boottime = {
61713 .clock_getres = hrtimer_get_res,
61714 .clock_get = posix_get_boottime,
61715 .nsleep = common_nsleep,
61716 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
61717 .timer_del = common_timer_del,
61718 };
61719
61720 + pax_track_stack();
61721 +
61722 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
61723 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
61724 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61725 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
61726 return;
61727 }
61728
61729 - posix_clocks[clock_id] = *new_clock;
61730 + posix_clocks[clock_id] = new_clock;
61731 }
61732 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
61733
61734 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
61735 return (id & CLOCKFD_MASK) == CLOCKFD ?
61736 &clock_posix_dynamic : &clock_posix_cpu;
61737
61738 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
61739 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
61740 return NULL;
61741 - return &posix_clocks[id];
61742 + return posix_clocks[id];
61743 }
61744
61745 static int common_timer_create(struct k_itimer *new_timer)
61746 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61747 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61748 return -EFAULT;
61749
61750 + /* only the CLOCK_REALTIME clock can be set, all other clocks
61751 + have their clock_set fptr set to a nosettime dummy function
61752 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61753 + call common_clock_set, which calls do_sys_settimeofday, which
61754 + we hook
61755 + */
61756 +
61757 return kc->clock_set(which_clock, &new_tp);
61758 }
61759
61760 diff -urNp linux-3.0.4/kernel/power/poweroff.c linux-3.0.4/kernel/power/poweroff.c
61761 --- linux-3.0.4/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
61762 +++ linux-3.0.4/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
61763 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61764 .enable_mask = SYSRQ_ENABLE_BOOT,
61765 };
61766
61767 -static int pm_sysrq_init(void)
61768 +static int __init pm_sysrq_init(void)
61769 {
61770 register_sysrq_key('o', &sysrq_poweroff_op);
61771 return 0;
61772 diff -urNp linux-3.0.4/kernel/power/process.c linux-3.0.4/kernel/power/process.c
61773 --- linux-3.0.4/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
61774 +++ linux-3.0.4/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
61775 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
61776 u64 elapsed_csecs64;
61777 unsigned int elapsed_csecs;
61778 bool wakeup = false;
61779 + bool timedout = false;
61780
61781 do_gettimeofday(&start);
61782
61783 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
61784
61785 while (true) {
61786 todo = 0;
61787 + if (time_after(jiffies, end_time))
61788 + timedout = true;
61789 read_lock(&tasklist_lock);
61790 do_each_thread(g, p) {
61791 if (frozen(p) || !freezable(p))
61792 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
61793 * try_to_stop() after schedule() in ptrace/signal
61794 * stop sees TIF_FREEZE.
61795 */
61796 - if (!task_is_stopped_or_traced(p) &&
61797 - !freezer_should_skip(p))
61798 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61799 todo++;
61800 + if (timedout) {
61801 + printk(KERN_ERR "Task refusing to freeze:\n");
61802 + sched_show_task(p);
61803 + }
61804 + }
61805 } while_each_thread(g, p);
61806 read_unlock(&tasklist_lock);
61807
61808 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
61809 todo += wq_busy;
61810 }
61811
61812 - if (!todo || time_after(jiffies, end_time))
61813 + if (!todo || timedout)
61814 break;
61815
61816 if (pm_wakeup_pending()) {
61817 diff -urNp linux-3.0.4/kernel/printk.c linux-3.0.4/kernel/printk.c
61818 --- linux-3.0.4/kernel/printk.c 2011-07-21 22:17:23.000000000 -0400
61819 +++ linux-3.0.4/kernel/printk.c 2011-08-23 21:48:14.000000000 -0400
61820 @@ -313,12 +313,17 @@ static int check_syslog_permissions(int
61821 if (from_file && type != SYSLOG_ACTION_OPEN)
61822 return 0;
61823
61824 +#ifdef CONFIG_GRKERNSEC_DMESG
61825 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
61826 + return -EPERM;
61827 +#endif
61828 +
61829 if (syslog_action_restricted(type)) {
61830 if (capable(CAP_SYSLOG))
61831 return 0;
61832 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
61833 if (capable(CAP_SYS_ADMIN)) {
61834 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
61835 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
61836 "but no CAP_SYSLOG (deprecated).\n");
61837 return 0;
61838 }
61839 diff -urNp linux-3.0.4/kernel/profile.c linux-3.0.4/kernel/profile.c
61840 --- linux-3.0.4/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
61841 +++ linux-3.0.4/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
61842 @@ -39,7 +39,7 @@ struct profile_hit {
61843 /* Oprofile timer tick hook */
61844 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61845
61846 -static atomic_t *prof_buffer;
61847 +static atomic_unchecked_t *prof_buffer;
61848 static unsigned long prof_len, prof_shift;
61849
61850 int prof_on __read_mostly;
61851 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
61852 hits[i].pc = 0;
61853 continue;
61854 }
61855 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61856 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61857 hits[i].hits = hits[i].pc = 0;
61858 }
61859 }
61860 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
61861 * Add the current hit(s) and flush the write-queue out
61862 * to the global buffer:
61863 */
61864 - atomic_add(nr_hits, &prof_buffer[pc]);
61865 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61866 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61867 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61868 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61869 hits[i].pc = hits[i].hits = 0;
61870 }
61871 out:
61872 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
61873 {
61874 unsigned long pc;
61875 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61876 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61877 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61878 }
61879 #endif /* !CONFIG_SMP */
61880
61881 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61882 return -EFAULT;
61883 buf++; p++; count--; read++;
61884 }
61885 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61886 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61887 if (copy_to_user(buf, (void *)pnt, count))
61888 return -EFAULT;
61889 read += count;
61890 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61891 }
61892 #endif
61893 profile_discard_flip_buffers();
61894 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61895 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61896 return count;
61897 }
61898
61899 diff -urNp linux-3.0.4/kernel/ptrace.c linux-3.0.4/kernel/ptrace.c
61900 --- linux-3.0.4/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
61901 +++ linux-3.0.4/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
61902 @@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
61903 return ret;
61904 }
61905
61906 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61907 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61908 + unsigned int log)
61909 {
61910 const struct cred *cred = current_cred(), *tcred;
61911
61912 @@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
61913 cred->gid == tcred->sgid &&
61914 cred->gid == tcred->gid))
61915 goto ok;
61916 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
61917 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
61918 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
61919 goto ok;
61920 rcu_read_unlock();
61921 return -EPERM;
61922 @@ -167,7 +169,9 @@ ok:
61923 smp_rmb();
61924 if (task->mm)
61925 dumpable = get_dumpable(task->mm);
61926 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
61927 + if (!dumpable &&
61928 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
61929 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
61930 return -EPERM;
61931
61932 return security_ptrace_access_check(task, mode);
61933 @@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
61934 {
61935 int err;
61936 task_lock(task);
61937 - err = __ptrace_may_access(task, mode);
61938 + err = __ptrace_may_access(task, mode, 0);
61939 + task_unlock(task);
61940 + return !err;
61941 +}
61942 +
61943 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61944 +{
61945 + int err;
61946 + task_lock(task);
61947 + err = __ptrace_may_access(task, mode, 1);
61948 task_unlock(task);
61949 return !err;
61950 }
61951 @@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
61952 goto out;
61953
61954 task_lock(task);
61955 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61956 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61957 task_unlock(task);
61958 if (retval)
61959 goto unlock_creds;
61960 @@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
61961 goto unlock_tasklist;
61962
61963 task->ptrace = PT_PTRACED;
61964 - if (task_ns_capable(task, CAP_SYS_PTRACE))
61965 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
61966 task->ptrace |= PT_PTRACE_CAP;
61967
61968 __ptrace_link(task, current);
61969 @@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
61970 {
61971 int copied = 0;
61972
61973 + pax_track_stack();
61974 +
61975 while (len > 0) {
61976 char buf[128];
61977 int this_len, retval;
61978 @@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
61979 break;
61980 return -EIO;
61981 }
61982 - if (copy_to_user(dst, buf, retval))
61983 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
61984 return -EFAULT;
61985 copied += retval;
61986 src += retval;
61987 @@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
61988 {
61989 int copied = 0;
61990
61991 + pax_track_stack();
61992 +
61993 while (len > 0) {
61994 char buf[128];
61995 int this_len, retval;
61996 @@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
61997 {
61998 int ret = -EIO;
61999 siginfo_t siginfo;
62000 - void __user *datavp = (void __user *) data;
62001 + void __user *datavp = (__force void __user *) data;
62002 unsigned long __user *datalp = datavp;
62003
62004 + pax_track_stack();
62005 +
62006 switch (request) {
62007 case PTRACE_PEEKTEXT:
62008 case PTRACE_PEEKDATA:
62009 @@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
62010 goto out;
62011 }
62012
62013 + if (gr_handle_ptrace(child, request)) {
62014 + ret = -EPERM;
62015 + goto out_put_task_struct;
62016 + }
62017 +
62018 if (request == PTRACE_ATTACH) {
62019 ret = ptrace_attach(child);
62020 /*
62021 * Some architectures need to do book-keeping after
62022 * a ptrace attach.
62023 */
62024 - if (!ret)
62025 + if (!ret) {
62026 arch_ptrace_attach(child);
62027 + gr_audit_ptrace(child);
62028 + }
62029 goto out_put_task_struct;
62030 }
62031
62032 @@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
62033 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
62034 if (copied != sizeof(tmp))
62035 return -EIO;
62036 - return put_user(tmp, (unsigned long __user *)data);
62037 + return put_user(tmp, (__force unsigned long __user *)data);
62038 }
62039
62040 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
62041 @@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
62042 siginfo_t siginfo;
62043 int ret;
62044
62045 + pax_track_stack();
62046 +
62047 switch (request) {
62048 case PTRACE_PEEKTEXT:
62049 case PTRACE_PEEKDATA:
62050 @@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
62051 goto out;
62052 }
62053
62054 + if (gr_handle_ptrace(child, request)) {
62055 + ret = -EPERM;
62056 + goto out_put_task_struct;
62057 + }
62058 +
62059 if (request == PTRACE_ATTACH) {
62060 ret = ptrace_attach(child);
62061 /*
62062 * Some architectures need to do book-keeping after
62063 * a ptrace attach.
62064 */
62065 - if (!ret)
62066 + if (!ret) {
62067 arch_ptrace_attach(child);
62068 + gr_audit_ptrace(child);
62069 + }
62070 goto out_put_task_struct;
62071 }
62072
62073 diff -urNp linux-3.0.4/kernel/rcutorture.c linux-3.0.4/kernel/rcutorture.c
62074 --- linux-3.0.4/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
62075 +++ linux-3.0.4/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
62076 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
62077 { 0 };
62078 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
62079 { 0 };
62080 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62081 -static atomic_t n_rcu_torture_alloc;
62082 -static atomic_t n_rcu_torture_alloc_fail;
62083 -static atomic_t n_rcu_torture_free;
62084 -static atomic_t n_rcu_torture_mberror;
62085 -static atomic_t n_rcu_torture_error;
62086 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62087 +static atomic_unchecked_t n_rcu_torture_alloc;
62088 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
62089 +static atomic_unchecked_t n_rcu_torture_free;
62090 +static atomic_unchecked_t n_rcu_torture_mberror;
62091 +static atomic_unchecked_t n_rcu_torture_error;
62092 static long n_rcu_torture_boost_ktrerror;
62093 static long n_rcu_torture_boost_rterror;
62094 static long n_rcu_torture_boost_failure;
62095 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
62096
62097 spin_lock_bh(&rcu_torture_lock);
62098 if (list_empty(&rcu_torture_freelist)) {
62099 - atomic_inc(&n_rcu_torture_alloc_fail);
62100 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
62101 spin_unlock_bh(&rcu_torture_lock);
62102 return NULL;
62103 }
62104 - atomic_inc(&n_rcu_torture_alloc);
62105 + atomic_inc_unchecked(&n_rcu_torture_alloc);
62106 p = rcu_torture_freelist.next;
62107 list_del_init(p);
62108 spin_unlock_bh(&rcu_torture_lock);
62109 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
62110 static void
62111 rcu_torture_free(struct rcu_torture *p)
62112 {
62113 - atomic_inc(&n_rcu_torture_free);
62114 + atomic_inc_unchecked(&n_rcu_torture_free);
62115 spin_lock_bh(&rcu_torture_lock);
62116 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
62117 spin_unlock_bh(&rcu_torture_lock);
62118 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
62119 i = rp->rtort_pipe_count;
62120 if (i > RCU_TORTURE_PIPE_LEN)
62121 i = RCU_TORTURE_PIPE_LEN;
62122 - atomic_inc(&rcu_torture_wcount[i]);
62123 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
62124 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62125 rp->rtort_mbtest = 0;
62126 rcu_torture_free(rp);
62127 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
62128 i = rp->rtort_pipe_count;
62129 if (i > RCU_TORTURE_PIPE_LEN)
62130 i = RCU_TORTURE_PIPE_LEN;
62131 - atomic_inc(&rcu_torture_wcount[i]);
62132 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
62133 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62134 rp->rtort_mbtest = 0;
62135 list_del(&rp->rtort_free);
62136 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
62137 i = old_rp->rtort_pipe_count;
62138 if (i > RCU_TORTURE_PIPE_LEN)
62139 i = RCU_TORTURE_PIPE_LEN;
62140 - atomic_inc(&rcu_torture_wcount[i]);
62141 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
62142 old_rp->rtort_pipe_count++;
62143 cur_ops->deferred_free(old_rp);
62144 }
62145 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
62146 return;
62147 }
62148 if (p->rtort_mbtest == 0)
62149 - atomic_inc(&n_rcu_torture_mberror);
62150 + atomic_inc_unchecked(&n_rcu_torture_mberror);
62151 spin_lock(&rand_lock);
62152 cur_ops->read_delay(&rand);
62153 n_rcu_torture_timers++;
62154 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
62155 continue;
62156 }
62157 if (p->rtort_mbtest == 0)
62158 - atomic_inc(&n_rcu_torture_mberror);
62159 + atomic_inc_unchecked(&n_rcu_torture_mberror);
62160 cur_ops->read_delay(&rand);
62161 preempt_disable();
62162 pipe_count = p->rtort_pipe_count;
62163 @@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
62164 rcu_torture_current,
62165 rcu_torture_current_version,
62166 list_empty(&rcu_torture_freelist),
62167 - atomic_read(&n_rcu_torture_alloc),
62168 - atomic_read(&n_rcu_torture_alloc_fail),
62169 - atomic_read(&n_rcu_torture_free),
62170 - atomic_read(&n_rcu_torture_mberror),
62171 + atomic_read_unchecked(&n_rcu_torture_alloc),
62172 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
62173 + atomic_read_unchecked(&n_rcu_torture_free),
62174 + atomic_read_unchecked(&n_rcu_torture_mberror),
62175 n_rcu_torture_boost_ktrerror,
62176 n_rcu_torture_boost_rterror,
62177 n_rcu_torture_boost_failure,
62178 n_rcu_torture_boosts,
62179 n_rcu_torture_timers);
62180 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
62181 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
62182 n_rcu_torture_boost_ktrerror != 0 ||
62183 n_rcu_torture_boost_rterror != 0 ||
62184 n_rcu_torture_boost_failure != 0)
62185 @@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
62186 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
62187 if (i > 1) {
62188 cnt += sprintf(&page[cnt], "!!! ");
62189 - atomic_inc(&n_rcu_torture_error);
62190 + atomic_inc_unchecked(&n_rcu_torture_error);
62191 WARN_ON_ONCE(1);
62192 }
62193 cnt += sprintf(&page[cnt], "Reader Pipe: ");
62194 @@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
62195 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
62196 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62197 cnt += sprintf(&page[cnt], " %d",
62198 - atomic_read(&rcu_torture_wcount[i]));
62199 + atomic_read_unchecked(&rcu_torture_wcount[i]));
62200 }
62201 cnt += sprintf(&page[cnt], "\n");
62202 if (cur_ops->stats)
62203 @@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
62204
62205 if (cur_ops->cleanup)
62206 cur_ops->cleanup();
62207 - if (atomic_read(&n_rcu_torture_error))
62208 + if (atomic_read_unchecked(&n_rcu_torture_error))
62209 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
62210 else
62211 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
62212 @@ -1476,17 +1476,17 @@ rcu_torture_init(void)
62213
62214 rcu_torture_current = NULL;
62215 rcu_torture_current_version = 0;
62216 - atomic_set(&n_rcu_torture_alloc, 0);
62217 - atomic_set(&n_rcu_torture_alloc_fail, 0);
62218 - atomic_set(&n_rcu_torture_free, 0);
62219 - atomic_set(&n_rcu_torture_mberror, 0);
62220 - atomic_set(&n_rcu_torture_error, 0);
62221 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
62222 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
62223 + atomic_set_unchecked(&n_rcu_torture_free, 0);
62224 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
62225 + atomic_set_unchecked(&n_rcu_torture_error, 0);
62226 n_rcu_torture_boost_ktrerror = 0;
62227 n_rcu_torture_boost_rterror = 0;
62228 n_rcu_torture_boost_failure = 0;
62229 n_rcu_torture_boosts = 0;
62230 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
62231 - atomic_set(&rcu_torture_wcount[i], 0);
62232 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
62233 for_each_possible_cpu(cpu) {
62234 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62235 per_cpu(rcu_torture_count, cpu)[i] = 0;
62236 diff -urNp linux-3.0.4/kernel/rcutree.c linux-3.0.4/kernel/rcutree.c
62237 --- linux-3.0.4/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
62238 +++ linux-3.0.4/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
62239 @@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
62240 }
62241 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
62242 smp_mb__before_atomic_inc(); /* See above. */
62243 - atomic_inc(&rdtp->dynticks);
62244 + atomic_inc_unchecked(&rdtp->dynticks);
62245 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
62246 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
62247 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
62248 local_irq_restore(flags);
62249
62250 /* If the interrupt queued a callback, get out of dyntick mode. */
62251 @@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
62252 return;
62253 }
62254 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
62255 - atomic_inc(&rdtp->dynticks);
62256 + atomic_inc_unchecked(&rdtp->dynticks);
62257 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
62258 smp_mb__after_atomic_inc(); /* See above. */
62259 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
62260 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
62261 local_irq_restore(flags);
62262 }
62263
62264 @@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
62265 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
62266
62267 if (rdtp->dynticks_nmi_nesting == 0 &&
62268 - (atomic_read(&rdtp->dynticks) & 0x1))
62269 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
62270 return;
62271 rdtp->dynticks_nmi_nesting++;
62272 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
62273 - atomic_inc(&rdtp->dynticks);
62274 + atomic_inc_unchecked(&rdtp->dynticks);
62275 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
62276 smp_mb__after_atomic_inc(); /* See above. */
62277 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
62278 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
62279 }
62280
62281 /**
62282 @@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
62283 return;
62284 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
62285 smp_mb__before_atomic_inc(); /* See above. */
62286 - atomic_inc(&rdtp->dynticks);
62287 + atomic_inc_unchecked(&rdtp->dynticks);
62288 smp_mb__after_atomic_inc(); /* Force delay to next write. */
62289 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
62290 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
62291 }
62292
62293 /**
62294 @@ -469,7 +469,7 @@ void rcu_irq_exit(void)
62295 */
62296 static int dyntick_save_progress_counter(struct rcu_data *rdp)
62297 {
62298 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
62299 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
62300 return 0;
62301 }
62302
62303 @@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
62304 unsigned long curr;
62305 unsigned long snap;
62306
62307 - curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
62308 + curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
62309 snap = (unsigned long)rdp->dynticks_snap;
62310
62311 /*
62312 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
62313 /*
62314 * Do softirq processing for the current CPU.
62315 */
62316 -static void rcu_process_callbacks(struct softirq_action *unused)
62317 +static void rcu_process_callbacks(void)
62318 {
62319 __rcu_process_callbacks(&rcu_sched_state,
62320 &__get_cpu_var(rcu_sched_data));
62321 diff -urNp linux-3.0.4/kernel/rcutree.h linux-3.0.4/kernel/rcutree.h
62322 --- linux-3.0.4/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
62323 +++ linux-3.0.4/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
62324 @@ -86,7 +86,7 @@
62325 struct rcu_dynticks {
62326 int dynticks_nesting; /* Track irq/process nesting level. */
62327 int dynticks_nmi_nesting; /* Track NMI nesting level. */
62328 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
62329 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
62330 };
62331
62332 /* RCU's kthread states for tracing. */
62333 diff -urNp linux-3.0.4/kernel/rcutree_plugin.h linux-3.0.4/kernel/rcutree_plugin.h
62334 --- linux-3.0.4/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
62335 +++ linux-3.0.4/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
62336 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
62337
62338 /* Clean up and exit. */
62339 smp_mb(); /* ensure expedited GP seen before counter increment. */
62340 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
62341 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
62342 unlock_mb_ret:
62343 mutex_unlock(&sync_rcu_preempt_exp_mutex);
62344 mb_ret:
62345 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
62346
62347 #else /* #ifndef CONFIG_SMP */
62348
62349 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
62350 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
62351 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
62352 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
62353
62354 static int synchronize_sched_expedited_cpu_stop(void *data)
62355 {
62356 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
62357 int firstsnap, s, snap, trycount = 0;
62358
62359 /* Note that atomic_inc_return() implies full memory barrier. */
62360 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
62361 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
62362 get_online_cpus();
62363
62364 /*
62365 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
62366 }
62367
62368 /* Check to see if someone else did our work for us. */
62369 - s = atomic_read(&sync_sched_expedited_done);
62370 + s = atomic_read_unchecked(&sync_sched_expedited_done);
62371 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
62372 smp_mb(); /* ensure test happens before caller kfree */
62373 return;
62374 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
62375 * grace period works for us.
62376 */
62377 get_online_cpus();
62378 - snap = atomic_read(&sync_sched_expedited_started) - 1;
62379 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
62380 smp_mb(); /* ensure read is before try_stop_cpus(). */
62381 }
62382
62383 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
62384 * than we did beat us to the punch.
62385 */
62386 do {
62387 - s = atomic_read(&sync_sched_expedited_done);
62388 + s = atomic_read_unchecked(&sync_sched_expedited_done);
62389 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
62390 smp_mb(); /* ensure test happens before caller kfree */
62391 break;
62392 }
62393 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
62394 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
62395
62396 put_online_cpus();
62397 }
62398 diff -urNp linux-3.0.4/kernel/relay.c linux-3.0.4/kernel/relay.c
62399 --- linux-3.0.4/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
62400 +++ linux-3.0.4/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
62401 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
62402 };
62403 ssize_t ret;
62404
62405 + pax_track_stack();
62406 +
62407 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
62408 return 0;
62409 if (splice_grow_spd(pipe, &spd))
62410 diff -urNp linux-3.0.4/kernel/resource.c linux-3.0.4/kernel/resource.c
62411 --- linux-3.0.4/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
62412 +++ linux-3.0.4/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
62413 @@ -141,8 +141,18 @@ static const struct file_operations proc
62414
62415 static int __init ioresources_init(void)
62416 {
62417 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62418 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62419 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
62420 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
62421 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62422 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
62423 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
62424 +#endif
62425 +#else
62426 proc_create("ioports", 0, NULL, &proc_ioports_operations);
62427 proc_create("iomem", 0, NULL, &proc_iomem_operations);
62428 +#endif
62429 return 0;
62430 }
62431 __initcall(ioresources_init);
62432 diff -urNp linux-3.0.4/kernel/rtmutex-tester.c linux-3.0.4/kernel/rtmutex-tester.c
62433 --- linux-3.0.4/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
62434 +++ linux-3.0.4/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
62435 @@ -20,7 +20,7 @@
62436 #define MAX_RT_TEST_MUTEXES 8
62437
62438 static spinlock_t rttest_lock;
62439 -static atomic_t rttest_event;
62440 +static atomic_unchecked_t rttest_event;
62441
62442 struct test_thread_data {
62443 int opcode;
62444 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
62445
62446 case RTTEST_LOCKCONT:
62447 td->mutexes[td->opdata] = 1;
62448 - td->event = atomic_add_return(1, &rttest_event);
62449 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62450 return 0;
62451
62452 case RTTEST_RESET:
62453 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
62454 return 0;
62455
62456 case RTTEST_RESETEVENT:
62457 - atomic_set(&rttest_event, 0);
62458 + atomic_set_unchecked(&rttest_event, 0);
62459 return 0;
62460
62461 default:
62462 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
62463 return ret;
62464
62465 td->mutexes[id] = 1;
62466 - td->event = atomic_add_return(1, &rttest_event);
62467 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62468 rt_mutex_lock(&mutexes[id]);
62469 - td->event = atomic_add_return(1, &rttest_event);
62470 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62471 td->mutexes[id] = 4;
62472 return 0;
62473
62474 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
62475 return ret;
62476
62477 td->mutexes[id] = 1;
62478 - td->event = atomic_add_return(1, &rttest_event);
62479 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62480 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
62481 - td->event = atomic_add_return(1, &rttest_event);
62482 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62483 td->mutexes[id] = ret ? 0 : 4;
62484 return ret ? -EINTR : 0;
62485
62486 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
62487 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
62488 return ret;
62489
62490 - td->event = atomic_add_return(1, &rttest_event);
62491 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62492 rt_mutex_unlock(&mutexes[id]);
62493 - td->event = atomic_add_return(1, &rttest_event);
62494 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62495 td->mutexes[id] = 0;
62496 return 0;
62497
62498 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
62499 break;
62500
62501 td->mutexes[dat] = 2;
62502 - td->event = atomic_add_return(1, &rttest_event);
62503 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62504 break;
62505
62506 default:
62507 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
62508 return;
62509
62510 td->mutexes[dat] = 3;
62511 - td->event = atomic_add_return(1, &rttest_event);
62512 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62513 break;
62514
62515 case RTTEST_LOCKNOWAIT:
62516 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
62517 return;
62518
62519 td->mutexes[dat] = 1;
62520 - td->event = atomic_add_return(1, &rttest_event);
62521 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62522 return;
62523
62524 default:
62525 diff -urNp linux-3.0.4/kernel/sched_autogroup.c linux-3.0.4/kernel/sched_autogroup.c
62526 --- linux-3.0.4/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
62527 +++ linux-3.0.4/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
62528 @@ -7,7 +7,7 @@
62529
62530 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
62531 static struct autogroup autogroup_default;
62532 -static atomic_t autogroup_seq_nr;
62533 +static atomic_unchecked_t autogroup_seq_nr;
62534
62535 static void __init autogroup_init(struct task_struct *init_task)
62536 {
62537 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
62538
62539 kref_init(&ag->kref);
62540 init_rwsem(&ag->lock);
62541 - ag->id = atomic_inc_return(&autogroup_seq_nr);
62542 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
62543 ag->tg = tg;
62544 #ifdef CONFIG_RT_GROUP_SCHED
62545 /*
62546 diff -urNp linux-3.0.4/kernel/sched.c linux-3.0.4/kernel/sched.c
62547 --- linux-3.0.4/kernel/sched.c 2011-07-21 22:17:23.000000000 -0400
62548 +++ linux-3.0.4/kernel/sched.c 2011-08-23 21:48:14.000000000 -0400
62549 @@ -4251,6 +4251,8 @@ asmlinkage void __sched schedule(void)
62550 struct rq *rq;
62551 int cpu;
62552
62553 + pax_track_stack();
62554 +
62555 need_resched:
62556 preempt_disable();
62557 cpu = smp_processor_id();
62558 @@ -4934,6 +4936,8 @@ int can_nice(const struct task_struct *p
62559 /* convert nice value [19,-20] to rlimit style value [1,40] */
62560 int nice_rlim = 20 - nice;
62561
62562 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
62563 +
62564 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
62565 capable(CAP_SYS_NICE));
62566 }
62567 @@ -4967,7 +4971,8 @@ SYSCALL_DEFINE1(nice, int, increment)
62568 if (nice > 19)
62569 nice = 19;
62570
62571 - if (increment < 0 && !can_nice(current, nice))
62572 + if (increment < 0 && (!can_nice(current, nice) ||
62573 + gr_handle_chroot_nice()))
62574 return -EPERM;
62575
62576 retval = security_task_setnice(current, nice);
62577 @@ -5111,6 +5116,7 @@ recheck:
62578 unsigned long rlim_rtprio =
62579 task_rlimit(p, RLIMIT_RTPRIO);
62580
62581 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
62582 /* can't set/change the rt policy */
62583 if (policy != p->policy && !rlim_rtprio)
62584 return -EPERM;
62585 diff -urNp linux-3.0.4/kernel/sched_fair.c linux-3.0.4/kernel/sched_fair.c
62586 --- linux-3.0.4/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
62587 +++ linux-3.0.4/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
62588 @@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
62589 * run_rebalance_domains is triggered when needed from the scheduler tick.
62590 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
62591 */
62592 -static void run_rebalance_domains(struct softirq_action *h)
62593 +static void run_rebalance_domains(void)
62594 {
62595 int this_cpu = smp_processor_id();
62596 struct rq *this_rq = cpu_rq(this_cpu);
62597 diff -urNp linux-3.0.4/kernel/signal.c linux-3.0.4/kernel/signal.c
62598 --- linux-3.0.4/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
62599 +++ linux-3.0.4/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
62600 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
62601
62602 int print_fatal_signals __read_mostly;
62603
62604 -static void __user *sig_handler(struct task_struct *t, int sig)
62605 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
62606 {
62607 return t->sighand->action[sig - 1].sa.sa_handler;
62608 }
62609
62610 -static int sig_handler_ignored(void __user *handler, int sig)
62611 +static int sig_handler_ignored(__sighandler_t handler, int sig)
62612 {
62613 /* Is it explicitly or implicitly ignored? */
62614 return handler == SIG_IGN ||
62615 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
62616 static int sig_task_ignored(struct task_struct *t, int sig,
62617 int from_ancestor_ns)
62618 {
62619 - void __user *handler;
62620 + __sighandler_t handler;
62621
62622 handler = sig_handler(t, sig);
62623
62624 @@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
62625 atomic_inc(&user->sigpending);
62626 rcu_read_unlock();
62627
62628 + if (!override_rlimit)
62629 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
62630 +
62631 if (override_rlimit ||
62632 atomic_read(&user->sigpending) <=
62633 task_rlimit(t, RLIMIT_SIGPENDING)) {
62634 @@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
62635
62636 int unhandled_signal(struct task_struct *tsk, int sig)
62637 {
62638 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
62639 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
62640 if (is_global_init(tsk))
62641 return 1;
62642 if (handler != SIG_IGN && handler != SIG_DFL)
62643 @@ -770,6 +773,13 @@ static int check_kill_permission(int sig
62644 }
62645 }
62646
62647 + /* allow glibc communication via tgkill to other threads in our
62648 + thread group */
62649 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
62650 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
62651 + && gr_handle_signal(t, sig))
62652 + return -EPERM;
62653 +
62654 return security_task_kill(t, info, sig, 0);
62655 }
62656
62657 @@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
62658 return send_signal(sig, info, p, 1);
62659 }
62660
62661 -static int
62662 +int
62663 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
62664 {
62665 return send_signal(sig, info, t, 0);
62666 @@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
62667 unsigned long int flags;
62668 int ret, blocked, ignored;
62669 struct k_sigaction *action;
62670 + int is_unhandled = 0;
62671
62672 spin_lock_irqsave(&t->sighand->siglock, flags);
62673 action = &t->sighand->action[sig-1];
62674 @@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
62675 }
62676 if (action->sa.sa_handler == SIG_DFL)
62677 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62678 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62679 + is_unhandled = 1;
62680 ret = specific_send_sig_info(sig, info, t);
62681 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62682
62683 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
62684 + normal operation */
62685 + if (is_unhandled) {
62686 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62687 + gr_handle_crash(t, sig);
62688 + }
62689 +
62690 return ret;
62691 }
62692
62693 @@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
62694 ret = check_kill_permission(sig, info, p);
62695 rcu_read_unlock();
62696
62697 - if (!ret && sig)
62698 + if (!ret && sig) {
62699 ret = do_send_sig_info(sig, info, p, true);
62700 + if (!ret)
62701 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62702 + }
62703
62704 return ret;
62705 }
62706 @@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
62707 {
62708 siginfo_t info;
62709
62710 + pax_track_stack();
62711 +
62712 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62713
62714 memset(&info, 0, sizeof info);
62715 @@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
62716 int error = -ESRCH;
62717
62718 rcu_read_lock();
62719 - p = find_task_by_vpid(pid);
62720 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62721 + /* allow glibc communication via tgkill to other threads in our
62722 + thread group */
62723 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62724 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
62725 + p = find_task_by_vpid_unrestricted(pid);
62726 + else
62727 +#endif
62728 + p = find_task_by_vpid(pid);
62729 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62730 error = check_kill_permission(sig, info, p);
62731 /*
62732 diff -urNp linux-3.0.4/kernel/smp.c linux-3.0.4/kernel/smp.c
62733 --- linux-3.0.4/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
62734 +++ linux-3.0.4/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
62735 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
62736 }
62737 EXPORT_SYMBOL(smp_call_function);
62738
62739 -void ipi_call_lock(void)
62740 +void ipi_call_lock(void) __acquires(call_function.lock)
62741 {
62742 raw_spin_lock(&call_function.lock);
62743 }
62744
62745 -void ipi_call_unlock(void)
62746 +void ipi_call_unlock(void) __releases(call_function.lock)
62747 {
62748 raw_spin_unlock(&call_function.lock);
62749 }
62750
62751 -void ipi_call_lock_irq(void)
62752 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
62753 {
62754 raw_spin_lock_irq(&call_function.lock);
62755 }
62756
62757 -void ipi_call_unlock_irq(void)
62758 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
62759 {
62760 raw_spin_unlock_irq(&call_function.lock);
62761 }
62762 diff -urNp linux-3.0.4/kernel/softirq.c linux-3.0.4/kernel/softirq.c
62763 --- linux-3.0.4/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
62764 +++ linux-3.0.4/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
62765 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62766
62767 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62768
62769 -char *softirq_to_name[NR_SOFTIRQS] = {
62770 +const char * const softirq_to_name[NR_SOFTIRQS] = {
62771 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62772 "TASKLET", "SCHED", "HRTIMER", "RCU"
62773 };
62774 @@ -235,7 +235,7 @@ restart:
62775 kstat_incr_softirqs_this_cpu(vec_nr);
62776
62777 trace_softirq_entry(vec_nr);
62778 - h->action(h);
62779 + h->action();
62780 trace_softirq_exit(vec_nr);
62781 if (unlikely(prev_count != preempt_count())) {
62782 printk(KERN_ERR "huh, entered softirq %u %s %p"
62783 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
62784 local_irq_restore(flags);
62785 }
62786
62787 -void open_softirq(int nr, void (*action)(struct softirq_action *))
62788 +void open_softirq(int nr, void (*action)(void))
62789 {
62790 - softirq_vec[nr].action = action;
62791 + pax_open_kernel();
62792 + *(void **)&softirq_vec[nr].action = action;
62793 + pax_close_kernel();
62794 }
62795
62796 /*
62797 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
62798
62799 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62800
62801 -static void tasklet_action(struct softirq_action *a)
62802 +static void tasklet_action(void)
62803 {
62804 struct tasklet_struct *list;
62805
62806 @@ -476,7 +478,7 @@ static void tasklet_action(struct softir
62807 }
62808 }
62809
62810 -static void tasklet_hi_action(struct softirq_action *a)
62811 +static void tasklet_hi_action(void)
62812 {
62813 struct tasklet_struct *list;
62814
62815 diff -urNp linux-3.0.4/kernel/sys.c linux-3.0.4/kernel/sys.c
62816 --- linux-3.0.4/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
62817 +++ linux-3.0.4/kernel/sys.c 2011-08-29 23:26:27.000000000 -0400
62818 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
62819 error = -EACCES;
62820 goto out;
62821 }
62822 +
62823 + if (gr_handle_chroot_setpriority(p, niceval)) {
62824 + error = -EACCES;
62825 + goto out;
62826 + }
62827 +
62828 no_nice = security_task_setnice(p, niceval);
62829 if (no_nice) {
62830 error = no_nice;
62831 @@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62832 goto error;
62833 }
62834
62835 + if (gr_check_group_change(new->gid, new->egid, -1))
62836 + goto error;
62837 +
62838 if (rgid != (gid_t) -1 ||
62839 (egid != (gid_t) -1 && egid != old->gid))
62840 new->sgid = new->egid;
62841 @@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62842 old = current_cred();
62843
62844 retval = -EPERM;
62845 +
62846 + if (gr_check_group_change(gid, gid, gid))
62847 + goto error;
62848 +
62849 if (nsown_capable(CAP_SETGID))
62850 new->gid = new->egid = new->sgid = new->fsgid = gid;
62851 else if (gid == old->gid || gid == old->sgid)
62852 @@ -595,11 +608,18 @@ static int set_user(struct cred *new)
62853 if (!new_user)
62854 return -EAGAIN;
62855
62856 + /*
62857 + * We don't fail in case of NPROC limit excess here because too many
62858 + * poorly written programs don't check set*uid() return code, assuming
62859 + * it never fails if called by root. We may still enforce NPROC limit
62860 + * for programs doing set*uid()+execve() by harmlessly deferring the
62861 + * failure to the execve() stage.
62862 + */
62863 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
62864 - new_user != INIT_USER) {
62865 - free_uid(new_user);
62866 - return -EAGAIN;
62867 - }
62868 + new_user != INIT_USER)
62869 + current->flags |= PF_NPROC_EXCEEDED;
62870 + else
62871 + current->flags &= ~PF_NPROC_EXCEEDED;
62872
62873 free_uid(new->user);
62874 new->user = new_user;
62875 @@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62876 goto error;
62877 }
62878
62879 + if (gr_check_user_change(new->uid, new->euid, -1))
62880 + goto error;
62881 +
62882 if (new->uid != old->uid) {
62883 retval = set_user(new);
62884 if (retval < 0)
62885 @@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62886 old = current_cred();
62887
62888 retval = -EPERM;
62889 +
62890 + if (gr_check_crash_uid(uid))
62891 + goto error;
62892 + if (gr_check_user_change(uid, uid, uid))
62893 + goto error;
62894 +
62895 if (nsown_capable(CAP_SETUID)) {
62896 new->suid = new->uid = uid;
62897 if (uid != old->uid) {
62898 @@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62899 goto error;
62900 }
62901
62902 + if (gr_check_user_change(ruid, euid, -1))
62903 + goto error;
62904 +
62905 if (ruid != (uid_t) -1) {
62906 new->uid = ruid;
62907 if (ruid != old->uid) {
62908 @@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62909 goto error;
62910 }
62911
62912 + if (gr_check_group_change(rgid, egid, -1))
62913 + goto error;
62914 +
62915 if (rgid != (gid_t) -1)
62916 new->gid = rgid;
62917 if (egid != (gid_t) -1)
62918 @@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62919 old = current_cred();
62920 old_fsuid = old->fsuid;
62921
62922 + if (gr_check_user_change(-1, -1, uid))
62923 + goto error;
62924 +
62925 if (uid == old->uid || uid == old->euid ||
62926 uid == old->suid || uid == old->fsuid ||
62927 nsown_capable(CAP_SETUID)) {
62928 @@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62929 }
62930 }
62931
62932 +error:
62933 abort_creds(new);
62934 return old_fsuid;
62935
62936 @@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62937 if (gid == old->gid || gid == old->egid ||
62938 gid == old->sgid || gid == old->fsgid ||
62939 nsown_capable(CAP_SETGID)) {
62940 + if (gr_check_group_change(-1, -1, gid))
62941 + goto error;
62942 +
62943 if (gid != old_fsgid) {
62944 new->fsgid = gid;
62945 goto change_okay;
62946 }
62947 }
62948
62949 +error:
62950 abort_creds(new);
62951 return old_fsgid;
62952
62953 @@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62954 error = get_dumpable(me->mm);
62955 break;
62956 case PR_SET_DUMPABLE:
62957 - if (arg2 < 0 || arg2 > 1) {
62958 + if (arg2 > 1) {
62959 error = -EINVAL;
62960 break;
62961 }
62962 diff -urNp linux-3.0.4/kernel/sysctl.c linux-3.0.4/kernel/sysctl.c
62963 --- linux-3.0.4/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
62964 +++ linux-3.0.4/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
62965 @@ -85,6 +85,13 @@
62966
62967
62968 #if defined(CONFIG_SYSCTL)
62969 +#include <linux/grsecurity.h>
62970 +#include <linux/grinternal.h>
62971 +
62972 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62973 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62974 + const int op);
62975 +extern int gr_handle_chroot_sysctl(const int op);
62976
62977 /* External variables not in a header file. */
62978 extern int sysctl_overcommit_memory;
62979 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
62980 }
62981
62982 #endif
62983 +extern struct ctl_table grsecurity_table[];
62984
62985 static struct ctl_table root_table[];
62986 static struct ctl_table_root sysctl_table_root;
62987 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
62988 int sysctl_legacy_va_layout;
62989 #endif
62990
62991 +#ifdef CONFIG_PAX_SOFTMODE
62992 +static ctl_table pax_table[] = {
62993 + {
62994 + .procname = "softmode",
62995 + .data = &pax_softmode,
62996 + .maxlen = sizeof(unsigned int),
62997 + .mode = 0600,
62998 + .proc_handler = &proc_dointvec,
62999 + },
63000 +
63001 + { }
63002 +};
63003 +#endif
63004 +
63005 /* The default sysctl tables: */
63006
63007 static struct ctl_table root_table[] = {
63008 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
63009 #endif
63010
63011 static struct ctl_table kern_table[] = {
63012 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
63013 + {
63014 + .procname = "grsecurity",
63015 + .mode = 0500,
63016 + .child = grsecurity_table,
63017 + },
63018 +#endif
63019 +
63020 +#ifdef CONFIG_PAX_SOFTMODE
63021 + {
63022 + .procname = "pax",
63023 + .mode = 0500,
63024 + .child = pax_table,
63025 + },
63026 +#endif
63027 +
63028 {
63029 .procname = "sched_child_runs_first",
63030 .data = &sysctl_sched_child_runs_first,
63031 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
63032 .data = &modprobe_path,
63033 .maxlen = KMOD_PATH_LEN,
63034 .mode = 0644,
63035 - .proc_handler = proc_dostring,
63036 + .proc_handler = proc_dostring_modpriv,
63037 },
63038 {
63039 .procname = "modules_disabled",
63040 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
63041 .extra1 = &zero,
63042 .extra2 = &one,
63043 },
63044 +#endif
63045 {
63046 .procname = "kptr_restrict",
63047 .data = &kptr_restrict,
63048 .maxlen = sizeof(int),
63049 .mode = 0644,
63050 .proc_handler = proc_dmesg_restrict,
63051 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63052 + .extra1 = &two,
63053 +#else
63054 .extra1 = &zero,
63055 +#endif
63056 .extra2 = &two,
63057 },
63058 -#endif
63059 {
63060 .procname = "ngroups_max",
63061 .data = &ngroups_max,
63062 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
63063 .proc_handler = proc_dointvec_minmax,
63064 .extra1 = &zero,
63065 },
63066 + {
63067 + .procname = "heap_stack_gap",
63068 + .data = &sysctl_heap_stack_gap,
63069 + .maxlen = sizeof(sysctl_heap_stack_gap),
63070 + .mode = 0644,
63071 + .proc_handler = proc_doulongvec_minmax,
63072 + },
63073 #else
63074 {
63075 .procname = "nr_trim_pages",
63076 @@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
63077 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
63078 {
63079 int mode;
63080 + int error;
63081 +
63082 + if (table->parent != NULL && table->parent->procname != NULL &&
63083 + table->procname != NULL &&
63084 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
63085 + return -EACCES;
63086 + if (gr_handle_chroot_sysctl(op))
63087 + return -EACCES;
63088 + error = gr_handle_sysctl(table, op);
63089 + if (error)
63090 + return error;
63091
63092 if (root->permissions)
63093 mode = root->permissions(root, current->nsproxy, table);
63094 @@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
63095 buffer, lenp, ppos);
63096 }
63097
63098 +int proc_dostring_modpriv(struct ctl_table *table, int write,
63099 + void __user *buffer, size_t *lenp, loff_t *ppos)
63100 +{
63101 + if (write && !capable(CAP_SYS_MODULE))
63102 + return -EPERM;
63103 +
63104 + return _proc_do_string(table->data, table->maxlen, write,
63105 + buffer, lenp, ppos);
63106 +}
63107 +
63108 static size_t proc_skip_spaces(char **buf)
63109 {
63110 size_t ret;
63111 @@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
63112 len = strlen(tmp);
63113 if (len > *size)
63114 len = *size;
63115 + if (len > sizeof(tmp))
63116 + len = sizeof(tmp);
63117 if (copy_to_user(*buf, tmp, len))
63118 return -EFAULT;
63119 *size -= len;
63120 @@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
63121 *i = val;
63122 } else {
63123 val = convdiv * (*i) / convmul;
63124 - if (!first)
63125 + if (!first) {
63126 err = proc_put_char(&buffer, &left, '\t');
63127 + if (err)
63128 + break;
63129 + }
63130 err = proc_put_long(&buffer, &left, val, false);
63131 if (err)
63132 break;
63133 @@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
63134 return -ENOSYS;
63135 }
63136
63137 +int proc_dostring_modpriv(struct ctl_table *table, int write,
63138 + void __user *buffer, size_t *lenp, loff_t *ppos)
63139 +{
63140 + return -ENOSYS;
63141 +}
63142 +
63143 int proc_dointvec(struct ctl_table *table, int write,
63144 void __user *buffer, size_t *lenp, loff_t *ppos)
63145 {
63146 @@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
63147 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
63148 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
63149 EXPORT_SYMBOL(proc_dostring);
63150 +EXPORT_SYMBOL(proc_dostring_modpriv);
63151 EXPORT_SYMBOL(proc_doulongvec_minmax);
63152 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
63153 EXPORT_SYMBOL(register_sysctl_table);
63154 diff -urNp linux-3.0.4/kernel/sysctl_check.c linux-3.0.4/kernel/sysctl_check.c
63155 --- linux-3.0.4/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
63156 +++ linux-3.0.4/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
63157 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
63158 set_fail(&fail, table, "Directory with extra2");
63159 } else {
63160 if ((table->proc_handler == proc_dostring) ||
63161 + (table->proc_handler == proc_dostring_modpriv) ||
63162 (table->proc_handler == proc_dointvec) ||
63163 (table->proc_handler == proc_dointvec_minmax) ||
63164 (table->proc_handler == proc_dointvec_jiffies) ||
63165 diff -urNp linux-3.0.4/kernel/taskstats.c linux-3.0.4/kernel/taskstats.c
63166 --- linux-3.0.4/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
63167 +++ linux-3.0.4/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
63168 @@ -27,9 +27,12 @@
63169 #include <linux/cgroup.h>
63170 #include <linux/fs.h>
63171 #include <linux/file.h>
63172 +#include <linux/grsecurity.h>
63173 #include <net/genetlink.h>
63174 #include <asm/atomic.h>
63175
63176 +extern int gr_is_taskstats_denied(int pid);
63177 +
63178 /*
63179 * Maximum length of a cpumask that can be specified in
63180 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
63181 @@ -558,6 +561,9 @@ err:
63182
63183 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
63184 {
63185 + if (gr_is_taskstats_denied(current->pid))
63186 + return -EACCES;
63187 +
63188 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
63189 return cmd_attr_register_cpumask(info);
63190 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
63191 diff -urNp linux-3.0.4/kernel/time/alarmtimer.c linux-3.0.4/kernel/time/alarmtimer.c
63192 --- linux-3.0.4/kernel/time/alarmtimer.c 2011-07-21 22:17:23.000000000 -0400
63193 +++ linux-3.0.4/kernel/time/alarmtimer.c 2011-08-23 21:47:56.000000000 -0400
63194 @@ -685,7 +685,7 @@ static int __init alarmtimer_init(void)
63195 {
63196 int error = 0;
63197 int i;
63198 - struct k_clock alarm_clock = {
63199 + static struct k_clock alarm_clock = {
63200 .clock_getres = alarm_clock_getres,
63201 .clock_get = alarm_clock_get,
63202 .timer_create = alarm_timer_create,
63203 diff -urNp linux-3.0.4/kernel/time/tick-broadcast.c linux-3.0.4/kernel/time/tick-broadcast.c
63204 --- linux-3.0.4/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
63205 +++ linux-3.0.4/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
63206 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
63207 * then clear the broadcast bit.
63208 */
63209 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
63210 - int cpu = smp_processor_id();
63211 + cpu = smp_processor_id();
63212
63213 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
63214 tick_broadcast_clear_oneshot(cpu);
63215 diff -urNp linux-3.0.4/kernel/time/timekeeping.c linux-3.0.4/kernel/time/timekeeping.c
63216 --- linux-3.0.4/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
63217 +++ linux-3.0.4/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
63218 @@ -14,6 +14,7 @@
63219 #include <linux/init.h>
63220 #include <linux/mm.h>
63221 #include <linux/sched.h>
63222 +#include <linux/grsecurity.h>
63223 #include <linux/syscore_ops.h>
63224 #include <linux/clocksource.h>
63225 #include <linux/jiffies.h>
63226 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
63227 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
63228 return -EINVAL;
63229
63230 + gr_log_timechange();
63231 +
63232 write_seqlock_irqsave(&xtime_lock, flags);
63233
63234 timekeeping_forward_now();
63235 diff -urNp linux-3.0.4/kernel/time/timer_list.c linux-3.0.4/kernel/time/timer_list.c
63236 --- linux-3.0.4/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
63237 +++ linux-3.0.4/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
63238 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
63239
63240 static void print_name_offset(struct seq_file *m, void *sym)
63241 {
63242 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63243 + SEQ_printf(m, "<%p>", NULL);
63244 +#else
63245 char symname[KSYM_NAME_LEN];
63246
63247 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
63248 SEQ_printf(m, "<%pK>", sym);
63249 else
63250 SEQ_printf(m, "%s", symname);
63251 +#endif
63252 }
63253
63254 static void
63255 @@ -112,7 +116,11 @@ next_one:
63256 static void
63257 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
63258 {
63259 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63260 + SEQ_printf(m, " .base: %p\n", NULL);
63261 +#else
63262 SEQ_printf(m, " .base: %pK\n", base);
63263 +#endif
63264 SEQ_printf(m, " .index: %d\n",
63265 base->index);
63266 SEQ_printf(m, " .resolution: %Lu nsecs\n",
63267 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
63268 {
63269 struct proc_dir_entry *pe;
63270
63271 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63272 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
63273 +#else
63274 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
63275 +#endif
63276 if (!pe)
63277 return -ENOMEM;
63278 return 0;
63279 diff -urNp linux-3.0.4/kernel/time/timer_stats.c linux-3.0.4/kernel/time/timer_stats.c
63280 --- linux-3.0.4/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
63281 +++ linux-3.0.4/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
63282 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
63283 static unsigned long nr_entries;
63284 static struct entry entries[MAX_ENTRIES];
63285
63286 -static atomic_t overflow_count;
63287 +static atomic_unchecked_t overflow_count;
63288
63289 /*
63290 * The entries are in a hash-table, for fast lookup:
63291 @@ -140,7 +140,7 @@ static void reset_entries(void)
63292 nr_entries = 0;
63293 memset(entries, 0, sizeof(entries));
63294 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
63295 - atomic_set(&overflow_count, 0);
63296 + atomic_set_unchecked(&overflow_count, 0);
63297 }
63298
63299 static struct entry *alloc_entry(void)
63300 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
63301 if (likely(entry))
63302 entry->count++;
63303 else
63304 - atomic_inc(&overflow_count);
63305 + atomic_inc_unchecked(&overflow_count);
63306
63307 out_unlock:
63308 raw_spin_unlock_irqrestore(lock, flags);
63309 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
63310
63311 static void print_name_offset(struct seq_file *m, unsigned long addr)
63312 {
63313 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63314 + seq_printf(m, "<%p>", NULL);
63315 +#else
63316 char symname[KSYM_NAME_LEN];
63317
63318 if (lookup_symbol_name(addr, symname) < 0)
63319 seq_printf(m, "<%p>", (void *)addr);
63320 else
63321 seq_printf(m, "%s", symname);
63322 +#endif
63323 }
63324
63325 static int tstats_show(struct seq_file *m, void *v)
63326 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
63327
63328 seq_puts(m, "Timer Stats Version: v0.2\n");
63329 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
63330 - if (atomic_read(&overflow_count))
63331 + if (atomic_read_unchecked(&overflow_count))
63332 seq_printf(m, "Overflow: %d entries\n",
63333 - atomic_read(&overflow_count));
63334 + atomic_read_unchecked(&overflow_count));
63335
63336 for (i = 0; i < nr_entries; i++) {
63337 entry = entries + i;
63338 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
63339 {
63340 struct proc_dir_entry *pe;
63341
63342 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63343 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
63344 +#else
63345 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
63346 +#endif
63347 if (!pe)
63348 return -ENOMEM;
63349 return 0;
63350 diff -urNp linux-3.0.4/kernel/time.c linux-3.0.4/kernel/time.c
63351 --- linux-3.0.4/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
63352 +++ linux-3.0.4/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
63353 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
63354 return error;
63355
63356 if (tz) {
63357 + /* we log in do_settimeofday called below, so don't log twice
63358 + */
63359 + if (!tv)
63360 + gr_log_timechange();
63361 +
63362 /* SMP safe, global irq locking makes it work. */
63363 sys_tz = *tz;
63364 update_vsyscall_tz();
63365 diff -urNp linux-3.0.4/kernel/timer.c linux-3.0.4/kernel/timer.c
63366 --- linux-3.0.4/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
63367 +++ linux-3.0.4/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
63368 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
63369 /*
63370 * This function runs timers and the timer-tq in bottom half context.
63371 */
63372 -static void run_timer_softirq(struct softirq_action *h)
63373 +static void run_timer_softirq(void)
63374 {
63375 struct tvec_base *base = __this_cpu_read(tvec_bases);
63376
63377 diff -urNp linux-3.0.4/kernel/trace/blktrace.c linux-3.0.4/kernel/trace/blktrace.c
63378 --- linux-3.0.4/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
63379 +++ linux-3.0.4/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
63380 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
63381 struct blk_trace *bt = filp->private_data;
63382 char buf[16];
63383
63384 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
63385 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
63386
63387 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
63388 }
63389 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
63390 return 1;
63391
63392 bt = buf->chan->private_data;
63393 - atomic_inc(&bt->dropped);
63394 + atomic_inc_unchecked(&bt->dropped);
63395 return 0;
63396 }
63397
63398 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
63399
63400 bt->dir = dir;
63401 bt->dev = dev;
63402 - atomic_set(&bt->dropped, 0);
63403 + atomic_set_unchecked(&bt->dropped, 0);
63404
63405 ret = -EIO;
63406 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
63407 diff -urNp linux-3.0.4/kernel/trace/ftrace.c linux-3.0.4/kernel/trace/ftrace.c
63408 --- linux-3.0.4/kernel/trace/ftrace.c 2011-07-21 22:17:23.000000000 -0400
63409 +++ linux-3.0.4/kernel/trace/ftrace.c 2011-08-23 21:47:56.000000000 -0400
63410 @@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
63411 if (unlikely(ftrace_disabled))
63412 return 0;
63413
63414 + ret = ftrace_arch_code_modify_prepare();
63415 + FTRACE_WARN_ON(ret);
63416 + if (ret)
63417 + return 0;
63418 +
63419 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
63420 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
63421 if (ret) {
63422 ftrace_bug(ret, ip);
63423 - return 0;
63424 }
63425 - return 1;
63426 + return ret ? 0 : 1;
63427 }
63428
63429 /*
63430 @@ -2550,7 +2555,7 @@ static void ftrace_free_entry_rcu(struct
63431
63432 int
63433 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
63434 - void *data)
63435 + void *data)
63436 {
63437 struct ftrace_func_probe *entry;
63438 struct ftrace_page *pg;
63439 diff -urNp linux-3.0.4/kernel/trace/trace.c linux-3.0.4/kernel/trace/trace.c
63440 --- linux-3.0.4/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
63441 +++ linux-3.0.4/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
63442 @@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
63443 size_t rem;
63444 unsigned int i;
63445
63446 + pax_track_stack();
63447 +
63448 if (splice_grow_spd(pipe, &spd))
63449 return -ENOMEM;
63450
63451 @@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
63452 int entries, size, i;
63453 size_t ret;
63454
63455 + pax_track_stack();
63456 +
63457 if (splice_grow_spd(pipe, &spd))
63458 return -ENOMEM;
63459
63460 @@ -3990,10 +3994,9 @@ static const struct file_operations trac
63461 };
63462 #endif
63463
63464 -static struct dentry *d_tracer;
63465 -
63466 struct dentry *tracing_init_dentry(void)
63467 {
63468 + static struct dentry *d_tracer;
63469 static int once;
63470
63471 if (d_tracer)
63472 @@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
63473 return d_tracer;
63474 }
63475
63476 -static struct dentry *d_percpu;
63477 -
63478 struct dentry *tracing_dentry_percpu(void)
63479 {
63480 + static struct dentry *d_percpu;
63481 static int once;
63482 struct dentry *d_tracer;
63483
63484 diff -urNp linux-3.0.4/kernel/trace/trace_events.c linux-3.0.4/kernel/trace/trace_events.c
63485 --- linux-3.0.4/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
63486 +++ linux-3.0.4/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
63487 @@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
63488 struct ftrace_module_file_ops {
63489 struct list_head list;
63490 struct module *mod;
63491 - struct file_operations id;
63492 - struct file_operations enable;
63493 - struct file_operations format;
63494 - struct file_operations filter;
63495 };
63496
63497 static struct ftrace_module_file_ops *
63498 @@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
63499
63500 file_ops->mod = mod;
63501
63502 - file_ops->id = ftrace_event_id_fops;
63503 - file_ops->id.owner = mod;
63504 -
63505 - file_ops->enable = ftrace_enable_fops;
63506 - file_ops->enable.owner = mod;
63507 -
63508 - file_ops->filter = ftrace_event_filter_fops;
63509 - file_ops->filter.owner = mod;
63510 -
63511 - file_ops->format = ftrace_event_format_fops;
63512 - file_ops->format.owner = mod;
63513 + pax_open_kernel();
63514 + *(void **)&mod->trace_id.owner = mod;
63515 + *(void **)&mod->trace_enable.owner = mod;
63516 + *(void **)&mod->trace_filter.owner = mod;
63517 + *(void **)&mod->trace_format.owner = mod;
63518 + pax_close_kernel();
63519
63520 list_add(&file_ops->list, &ftrace_module_file_list);
63521
63522 @@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
63523
63524 for_each_event(call, start, end) {
63525 __trace_add_event_call(*call, mod,
63526 - &file_ops->id, &file_ops->enable,
63527 - &file_ops->filter, &file_ops->format);
63528 + &mod->trace_id, &mod->trace_enable,
63529 + &mod->trace_filter, &mod->trace_format);
63530 }
63531 }
63532
63533 diff -urNp linux-3.0.4/kernel/trace/trace_mmiotrace.c linux-3.0.4/kernel/trace/trace_mmiotrace.c
63534 --- linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
63535 +++ linux-3.0.4/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
63536 @@ -24,7 +24,7 @@ struct header_iter {
63537 static struct trace_array *mmio_trace_array;
63538 static bool overrun_detected;
63539 static unsigned long prev_overruns;
63540 -static atomic_t dropped_count;
63541 +static atomic_unchecked_t dropped_count;
63542
63543 static void mmio_reset_data(struct trace_array *tr)
63544 {
63545 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
63546
63547 static unsigned long count_overruns(struct trace_iterator *iter)
63548 {
63549 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
63550 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
63551 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
63552
63553 if (over > prev_overruns)
63554 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
63555 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
63556 sizeof(*entry), 0, pc);
63557 if (!event) {
63558 - atomic_inc(&dropped_count);
63559 + atomic_inc_unchecked(&dropped_count);
63560 return;
63561 }
63562 entry = ring_buffer_event_data(event);
63563 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
63564 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
63565 sizeof(*entry), 0, pc);
63566 if (!event) {
63567 - atomic_inc(&dropped_count);
63568 + atomic_inc_unchecked(&dropped_count);
63569 return;
63570 }
63571 entry = ring_buffer_event_data(event);
63572 diff -urNp linux-3.0.4/kernel/trace/trace_output.c linux-3.0.4/kernel/trace/trace_output.c
63573 --- linux-3.0.4/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
63574 +++ linux-3.0.4/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
63575 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
63576
63577 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
63578 if (!IS_ERR(p)) {
63579 - p = mangle_path(s->buffer + s->len, p, "\n");
63580 + p = mangle_path(s->buffer + s->len, p, "\n\\");
63581 if (p) {
63582 s->len = p - s->buffer;
63583 return 1;
63584 diff -urNp linux-3.0.4/kernel/trace/trace_stack.c linux-3.0.4/kernel/trace/trace_stack.c
63585 --- linux-3.0.4/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
63586 +++ linux-3.0.4/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
63587 @@ -50,7 +50,7 @@ static inline void check_stack(void)
63588 return;
63589
63590 /* we do not handle interrupt stacks yet */
63591 - if (!object_is_on_stack(&this_size))
63592 + if (!object_starts_on_stack(&this_size))
63593 return;
63594
63595 local_irq_save(flags);
63596 diff -urNp linux-3.0.4/kernel/trace/trace_workqueue.c linux-3.0.4/kernel/trace/trace_workqueue.c
63597 --- linux-3.0.4/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
63598 +++ linux-3.0.4/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
63599 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
63600 int cpu;
63601 pid_t pid;
63602 /* Can be inserted from interrupt or user context, need to be atomic */
63603 - atomic_t inserted;
63604 + atomic_unchecked_t inserted;
63605 /*
63606 * Don't need to be atomic, works are serialized in a single workqueue thread
63607 * on a single CPU.
63608 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
63609 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
63610 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
63611 if (node->pid == wq_thread->pid) {
63612 - atomic_inc(&node->inserted);
63613 + atomic_inc_unchecked(&node->inserted);
63614 goto found;
63615 }
63616 }
63617 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
63618 tsk = get_pid_task(pid, PIDTYPE_PID);
63619 if (tsk) {
63620 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
63621 - atomic_read(&cws->inserted), cws->executed,
63622 + atomic_read_unchecked(&cws->inserted), cws->executed,
63623 tsk->comm);
63624 put_task_struct(tsk);
63625 }
63626 diff -urNp linux-3.0.4/lib/bug.c linux-3.0.4/lib/bug.c
63627 --- linux-3.0.4/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
63628 +++ linux-3.0.4/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
63629 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
63630 return BUG_TRAP_TYPE_NONE;
63631
63632 bug = find_bug(bugaddr);
63633 + if (!bug)
63634 + return BUG_TRAP_TYPE_NONE;
63635
63636 file = NULL;
63637 line = 0;
63638 diff -urNp linux-3.0.4/lib/debugobjects.c linux-3.0.4/lib/debugobjects.c
63639 --- linux-3.0.4/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
63640 +++ linux-3.0.4/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
63641 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
63642 if (limit > 4)
63643 return;
63644
63645 - is_on_stack = object_is_on_stack(addr);
63646 + is_on_stack = object_starts_on_stack(addr);
63647 if (is_on_stack == onstack)
63648 return;
63649
63650 diff -urNp linux-3.0.4/lib/dma-debug.c linux-3.0.4/lib/dma-debug.c
63651 --- linux-3.0.4/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
63652 +++ linux-3.0.4/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
63653 @@ -870,7 +870,7 @@ out:
63654
63655 static void check_for_stack(struct device *dev, void *addr)
63656 {
63657 - if (object_is_on_stack(addr))
63658 + if (object_starts_on_stack(addr))
63659 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
63660 "stack [addr=%p]\n", addr);
63661 }
63662 diff -urNp linux-3.0.4/lib/extable.c linux-3.0.4/lib/extable.c
63663 --- linux-3.0.4/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
63664 +++ linux-3.0.4/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
63665 @@ -13,6 +13,7 @@
63666 #include <linux/init.h>
63667 #include <linux/sort.h>
63668 #include <asm/uaccess.h>
63669 +#include <asm/pgtable.h>
63670
63671 #ifndef ARCH_HAS_SORT_EXTABLE
63672 /*
63673 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
63674 void sort_extable(struct exception_table_entry *start,
63675 struct exception_table_entry *finish)
63676 {
63677 + pax_open_kernel();
63678 sort(start, finish - start, sizeof(struct exception_table_entry),
63679 cmp_ex, NULL);
63680 + pax_close_kernel();
63681 }
63682
63683 #ifdef CONFIG_MODULES
63684 diff -urNp linux-3.0.4/lib/inflate.c linux-3.0.4/lib/inflate.c
63685 --- linux-3.0.4/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
63686 +++ linux-3.0.4/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
63687 @@ -269,7 +269,7 @@ static void free(void *where)
63688 malloc_ptr = free_mem_ptr;
63689 }
63690 #else
63691 -#define malloc(a) kmalloc(a, GFP_KERNEL)
63692 +#define malloc(a) kmalloc((a), GFP_KERNEL)
63693 #define free(a) kfree(a)
63694 #endif
63695
63696 diff -urNp linux-3.0.4/lib/Kconfig.debug linux-3.0.4/lib/Kconfig.debug
63697 --- linux-3.0.4/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
63698 +++ linux-3.0.4/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
63699 @@ -1088,6 +1088,7 @@ config LATENCYTOP
63700 depends on DEBUG_KERNEL
63701 depends on STACKTRACE_SUPPORT
63702 depends on PROC_FS
63703 + depends on !GRKERNSEC_HIDESYM
63704 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
63705 select KALLSYMS
63706 select KALLSYMS_ALL
63707 diff -urNp linux-3.0.4/lib/kref.c linux-3.0.4/lib/kref.c
63708 --- linux-3.0.4/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
63709 +++ linux-3.0.4/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
63710 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
63711 */
63712 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63713 {
63714 - WARN_ON(release == NULL);
63715 + BUG_ON(release == NULL);
63716 WARN_ON(release == (void (*)(struct kref *))kfree);
63717
63718 if (atomic_dec_and_test(&kref->refcount)) {
63719 diff -urNp linux-3.0.4/lib/radix-tree.c linux-3.0.4/lib/radix-tree.c
63720 --- linux-3.0.4/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
63721 +++ linux-3.0.4/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
63722 @@ -80,7 +80,7 @@ struct radix_tree_preload {
63723 int nr;
63724 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63725 };
63726 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63727 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63728
63729 static inline void *ptr_to_indirect(void *ptr)
63730 {
63731 diff -urNp linux-3.0.4/lib/vsprintf.c linux-3.0.4/lib/vsprintf.c
63732 --- linux-3.0.4/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
63733 +++ linux-3.0.4/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
63734 @@ -16,6 +16,9 @@
63735 * - scnprintf and vscnprintf
63736 */
63737
63738 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63739 +#define __INCLUDED_BY_HIDESYM 1
63740 +#endif
63741 #include <stdarg.h>
63742 #include <linux/module.h>
63743 #include <linux/types.h>
63744 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
63745 char sym[KSYM_SYMBOL_LEN];
63746 if (ext == 'B')
63747 sprint_backtrace(sym, value);
63748 - else if (ext != 'f' && ext != 's')
63749 + else if (ext != 'f' && ext != 's' && ext != 'a')
63750 sprint_symbol(sym, value);
63751 else
63752 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63753 @@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
63754 return string(buf, end, uuid, spec);
63755 }
63756
63757 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63758 +int kptr_restrict __read_mostly = 2;
63759 +#else
63760 int kptr_restrict __read_mostly;
63761 +#endif
63762
63763 /*
63764 * Show a '%p' thing. A kernel extension is that the '%p' is followed
63765 @@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
63766 * - 'S' For symbolic direct pointers with offset
63767 * - 's' For symbolic direct pointers without offset
63768 * - 'B' For backtraced symbolic direct pointers with offset
63769 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63770 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63771 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
63772 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
63773 * - 'M' For a 6-byte MAC address, it prints the address in the
63774 @@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
63775 {
63776 if (!ptr && *fmt != 'K') {
63777 /*
63778 - * Print (null) with the same width as a pointer so it makes
63779 + * Print (nil) with the same width as a pointer so it makes
63780 * tabular output look nice.
63781 */
63782 if (spec.field_width == -1)
63783 spec.field_width = 2 * sizeof(void *);
63784 - return string(buf, end, "(null)", spec);
63785 + return string(buf, end, "(nil)", spec);
63786 }
63787
63788 switch (*fmt) {
63789 @@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
63790 /* Fallthrough */
63791 case 'S':
63792 case 's':
63793 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63794 + break;
63795 +#else
63796 + return symbol_string(buf, end, ptr, spec, *fmt);
63797 +#endif
63798 + case 'A':
63799 + case 'a':
63800 case 'B':
63801 return symbol_string(buf, end, ptr, spec, *fmt);
63802 case 'R':
63803 @@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
63804 typeof(type) value; \
63805 if (sizeof(type) == 8) { \
63806 args = PTR_ALIGN(args, sizeof(u32)); \
63807 - *(u32 *)&value = *(u32 *)args; \
63808 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63809 + *(u32 *)&value = *(const u32 *)args; \
63810 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63811 } else { \
63812 args = PTR_ALIGN(args, sizeof(type)); \
63813 - value = *(typeof(type) *)args; \
63814 + value = *(const typeof(type) *)args; \
63815 } \
63816 args += sizeof(type); \
63817 value; \
63818 @@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
63819 case FORMAT_TYPE_STR: {
63820 const char *str_arg = args;
63821 args += strlen(str_arg) + 1;
63822 - str = string(str, end, (char *)str_arg, spec);
63823 + str = string(str, end, str_arg, spec);
63824 break;
63825 }
63826
63827 diff -urNp linux-3.0.4/localversion-grsec linux-3.0.4/localversion-grsec
63828 --- linux-3.0.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63829 +++ linux-3.0.4/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
63830 @@ -0,0 +1 @@
63831 +-grsec
63832 diff -urNp linux-3.0.4/Makefile linux-3.0.4/Makefile
63833 --- linux-3.0.4/Makefile 2011-09-02 18:11:26.000000000 -0400
63834 +++ linux-3.0.4/Makefile 2011-09-17 00:56:07.000000000 -0400
63835 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63836
63837 HOSTCC = gcc
63838 HOSTCXX = g++
63839 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63840 -HOSTCXXFLAGS = -O2
63841 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63842 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63843 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63844
63845 # Decide whether to build built-in, modular, or both.
63846 # Normally, just do built-in.
63847 @@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
63848 KBUILD_CPPFLAGS := -D__KERNEL__
63849
63850 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63851 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
63852 -fno-strict-aliasing -fno-common \
63853 -Werror-implicit-function-declaration \
63854 -Wno-format-security \
63855 -fno-delete-null-pointer-checks
63856 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63857 KBUILD_AFLAGS_KERNEL :=
63858 KBUILD_CFLAGS_KERNEL :=
63859 KBUILD_AFLAGS := -D__ASSEMBLY__
63860 @@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
63861 # Rules shared between *config targets and build targets
63862
63863 # Basic helpers built in scripts/
63864 -PHONY += scripts_basic
63865 -scripts_basic:
63866 +PHONY += scripts_basic gcc-plugins
63867 +scripts_basic: gcc-plugins
63868 $(Q)$(MAKE) $(build)=scripts/basic
63869 $(Q)rm -f .tmp_quiet_recordmcount
63870
63871 @@ -564,6 +567,31 @@ else
63872 KBUILD_CFLAGS += -O2
63873 endif
63874
63875 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
63876 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
63877 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
63878 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
63879 +endif
63880 +ifdef CONFIG_KALLOCSTAT_PLUGIN
63881 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
63882 +endif
63883 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
63884 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
63885 +endif
63886 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN)
63887 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN
63888 +gcc-plugins:
63889 + $(Q)$(MAKE) $(build)=tools/gcc
63890 +else
63891 +gcc-plugins:
63892 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
63893 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
63894 +else
63895 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
63896 +endif
63897 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
63898 +endif
63899 +
63900 include $(srctree)/arch/$(SRCARCH)/Makefile
63901
63902 ifneq ($(CONFIG_FRAME_WARN),0)
63903 @@ -708,7 +736,7 @@ export mod_strip_cmd
63904
63905
63906 ifeq ($(KBUILD_EXTMOD),)
63907 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63908 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63909
63910 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63911 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63912 @@ -907,6 +935,8 @@ define rule_vmlinux-modpost
63913 endef
63914
63915 # vmlinux image - including updated kernel symbols
63916 +$(vmlinux-all): KBUILD_CFLAGS += $(GCC_PLUGINS)
63917 +$(vmlinux-all): gcc-plugins
63918 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
63919 ifdef CONFIG_HEADERS_CHECK
63920 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
63921 @@ -941,7 +971,8 @@ $(sort $(vmlinux-init) $(vmlinux-main))
63922 # Error messages still appears in the original language
63923
63924 PHONY += $(vmlinux-dirs)
63925 -$(vmlinux-dirs): prepare scripts
63926 +$(vmlinux-dirs): KBUILD_CFLAGS += $(GCC_PLUGINS)
63927 +$(vmlinux-dirs): gcc-plugins prepare scripts
63928 $(Q)$(MAKE) $(build)=$@
63929
63930 # Store (new) KERNELRELASE string in include/config/kernel.release
63931 @@ -986,6 +1017,7 @@ prepare0: archprepare FORCE
63932 $(Q)$(MAKE) $(build)=. missing-syscalls
63933
63934 # All the preparing..
63935 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
63936 prepare: prepare0
63937
63938 # Generate some files
63939 @@ -1102,7 +1134,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
63940
63941 # Target to prepare building external modules
63942 PHONY += modules_prepare
63943 -modules_prepare: prepare scripts
63944 +modules_prepare: gcc-plugins prepare scripts
63945
63946 # Target to install modules
63947 PHONY += modules_install
63948 @@ -1198,7 +1230,7 @@ distclean: mrproper
63949 @find $(srctree) $(RCS_FIND_IGNORE) \
63950 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
63951 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
63952 - -o -name '.*.rej' -o -size 0 \
63953 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
63954 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
63955 -type f -print | xargs rm -f
63956
63957 @@ -1359,6 +1391,7 @@ PHONY += $(module-dirs) modules
63958 $(module-dirs): crmodverdir $(objtree)/Module.symvers
63959 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
63960
63961 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
63962 modules: $(module-dirs)
63963 @$(kecho) ' Building modules, stage 2.';
63964 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
63965 @@ -1485,17 +1518,19 @@ else
63966 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
63967 endif
63968
63969 -%.s: %.c prepare scripts FORCE
63970 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
63971 +%.s: %.c gcc-plugins prepare scripts FORCE
63972 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63973 %.i: %.c prepare scripts FORCE
63974 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63975 -%.o: %.c prepare scripts FORCE
63976 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
63977 +%.o: %.c gcc-plugins prepare scripts FORCE
63978 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63979 %.lst: %.c prepare scripts FORCE
63980 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63981 -%.s: %.S prepare scripts FORCE
63982 +%.s: %.S gcc-plugins prepare scripts FORCE
63983 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63984 -%.o: %.S prepare scripts FORCE
63985 +%.o: %.S gcc-plugins prepare scripts FORCE
63986 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63987 %.symtypes: %.c prepare scripts FORCE
63988 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
63989 @@ -1505,11 +1540,13 @@ endif
63990 $(cmd_crmodverdir)
63991 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
63992 $(build)=$(build-dir)
63993 -%/: prepare scripts FORCE
63994 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
63995 +%/: gcc-plugins prepare scripts FORCE
63996 $(cmd_crmodverdir)
63997 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
63998 $(build)=$(build-dir)
63999 -%.ko: prepare scripts FORCE
64000 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
64001 +%.ko: gcc-plugins prepare scripts FORCE
64002 $(cmd_crmodverdir)
64003 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
64004 $(build)=$(build-dir) $(@:.ko=.o)
64005 diff -urNp linux-3.0.4/mm/filemap.c linux-3.0.4/mm/filemap.c
64006 --- linux-3.0.4/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
64007 +++ linux-3.0.4/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
64008 @@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
64009 struct address_space *mapping = file->f_mapping;
64010
64011 if (!mapping->a_ops->readpage)
64012 - return -ENOEXEC;
64013 + return -ENODEV;
64014 file_accessed(file);
64015 vma->vm_ops = &generic_file_vm_ops;
64016 vma->vm_flags |= VM_CAN_NONLINEAR;
64017 @@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
64018 *pos = i_size_read(inode);
64019
64020 if (limit != RLIM_INFINITY) {
64021 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
64022 if (*pos >= limit) {
64023 send_sig(SIGXFSZ, current, 0);
64024 return -EFBIG;
64025 diff -urNp linux-3.0.4/mm/fremap.c linux-3.0.4/mm/fremap.c
64026 --- linux-3.0.4/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
64027 +++ linux-3.0.4/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
64028 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
64029 retry:
64030 vma = find_vma(mm, start);
64031
64032 +#ifdef CONFIG_PAX_SEGMEXEC
64033 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
64034 + goto out;
64035 +#endif
64036 +
64037 /*
64038 * Make sure the vma is shared, that it supports prefaulting,
64039 * and that the remapped range is valid and fully within
64040 diff -urNp linux-3.0.4/mm/highmem.c linux-3.0.4/mm/highmem.c
64041 --- linux-3.0.4/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
64042 +++ linux-3.0.4/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
64043 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
64044 * So no dangers, even with speculative execution.
64045 */
64046 page = pte_page(pkmap_page_table[i]);
64047 + pax_open_kernel();
64048 pte_clear(&init_mm, (unsigned long)page_address(page),
64049 &pkmap_page_table[i]);
64050 -
64051 + pax_close_kernel();
64052 set_page_address(page, NULL);
64053 need_flush = 1;
64054 }
64055 @@ -186,9 +187,11 @@ start:
64056 }
64057 }
64058 vaddr = PKMAP_ADDR(last_pkmap_nr);
64059 +
64060 + pax_open_kernel();
64061 set_pte_at(&init_mm, vaddr,
64062 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
64063 -
64064 + pax_close_kernel();
64065 pkmap_count[last_pkmap_nr] = 1;
64066 set_page_address(page, (void *)vaddr);
64067
64068 diff -urNp linux-3.0.4/mm/huge_memory.c linux-3.0.4/mm/huge_memory.c
64069 --- linux-3.0.4/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
64070 +++ linux-3.0.4/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
64071 @@ -702,7 +702,7 @@ out:
64072 * run pte_offset_map on the pmd, if an huge pmd could
64073 * materialize from under us from a different thread.
64074 */
64075 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
64076 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64077 return VM_FAULT_OOM;
64078 /* if an huge pmd materialized from under us just retry later */
64079 if (unlikely(pmd_trans_huge(*pmd)))
64080 diff -urNp linux-3.0.4/mm/hugetlb.c linux-3.0.4/mm/hugetlb.c
64081 --- linux-3.0.4/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
64082 +++ linux-3.0.4/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
64083 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
64084 return 1;
64085 }
64086
64087 +#ifdef CONFIG_PAX_SEGMEXEC
64088 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
64089 +{
64090 + struct mm_struct *mm = vma->vm_mm;
64091 + struct vm_area_struct *vma_m;
64092 + unsigned long address_m;
64093 + pte_t *ptep_m;
64094 +
64095 + vma_m = pax_find_mirror_vma(vma);
64096 + if (!vma_m)
64097 + return;
64098 +
64099 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64100 + address_m = address + SEGMEXEC_TASK_SIZE;
64101 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
64102 + get_page(page_m);
64103 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
64104 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
64105 +}
64106 +#endif
64107 +
64108 /*
64109 * Hugetlb_cow() should be called with page lock of the original hugepage held.
64110 */
64111 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
64112 make_huge_pte(vma, new_page, 1));
64113 page_remove_rmap(old_page);
64114 hugepage_add_new_anon_rmap(new_page, vma, address);
64115 +
64116 +#ifdef CONFIG_PAX_SEGMEXEC
64117 + pax_mirror_huge_pte(vma, address, new_page);
64118 +#endif
64119 +
64120 /* Make the old page be freed below */
64121 new_page = old_page;
64122 mmu_notifier_invalidate_range_end(mm,
64123 @@ -2591,6 +2617,10 @@ retry:
64124 && (vma->vm_flags & VM_SHARED)));
64125 set_huge_pte_at(mm, address, ptep, new_pte);
64126
64127 +#ifdef CONFIG_PAX_SEGMEXEC
64128 + pax_mirror_huge_pte(vma, address, page);
64129 +#endif
64130 +
64131 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
64132 /* Optimization, do the COW without a second fault */
64133 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
64134 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
64135 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
64136 struct hstate *h = hstate_vma(vma);
64137
64138 +#ifdef CONFIG_PAX_SEGMEXEC
64139 + struct vm_area_struct *vma_m;
64140 +#endif
64141 +
64142 ptep = huge_pte_offset(mm, address);
64143 if (ptep) {
64144 entry = huge_ptep_get(ptep);
64145 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
64146 VM_FAULT_SET_HINDEX(h - hstates);
64147 }
64148
64149 +#ifdef CONFIG_PAX_SEGMEXEC
64150 + vma_m = pax_find_mirror_vma(vma);
64151 + if (vma_m) {
64152 + unsigned long address_m;
64153 +
64154 + if (vma->vm_start > vma_m->vm_start) {
64155 + address_m = address;
64156 + address -= SEGMEXEC_TASK_SIZE;
64157 + vma = vma_m;
64158 + h = hstate_vma(vma);
64159 + } else
64160 + address_m = address + SEGMEXEC_TASK_SIZE;
64161 +
64162 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
64163 + return VM_FAULT_OOM;
64164 + address_m &= HPAGE_MASK;
64165 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
64166 + }
64167 +#endif
64168 +
64169 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
64170 if (!ptep)
64171 return VM_FAULT_OOM;
64172 diff -urNp linux-3.0.4/mm/internal.h linux-3.0.4/mm/internal.h
64173 --- linux-3.0.4/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
64174 +++ linux-3.0.4/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
64175 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
64176 * in mm/page_alloc.c
64177 */
64178 extern void __free_pages_bootmem(struct page *page, unsigned int order);
64179 +extern void free_compound_page(struct page *page);
64180 extern void prep_compound_page(struct page *page, unsigned long order);
64181 #ifdef CONFIG_MEMORY_FAILURE
64182 extern bool is_free_buddy_page(struct page *page);
64183 diff -urNp linux-3.0.4/mm/Kconfig linux-3.0.4/mm/Kconfig
64184 --- linux-3.0.4/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
64185 +++ linux-3.0.4/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
64186 @@ -240,7 +240,7 @@ config KSM
64187 config DEFAULT_MMAP_MIN_ADDR
64188 int "Low address space to protect from user allocation"
64189 depends on MMU
64190 - default 4096
64191 + default 65536
64192 help
64193 This is the portion of low virtual memory which should be protected
64194 from userspace allocation. Keeping a user from writing to low pages
64195 diff -urNp linux-3.0.4/mm/kmemleak.c linux-3.0.4/mm/kmemleak.c
64196 --- linux-3.0.4/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
64197 +++ linux-3.0.4/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
64198 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
64199
64200 for (i = 0; i < object->trace_len; i++) {
64201 void *ptr = (void *)object->trace[i];
64202 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
64203 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
64204 }
64205 }
64206
64207 diff -urNp linux-3.0.4/mm/madvise.c linux-3.0.4/mm/madvise.c
64208 --- linux-3.0.4/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
64209 +++ linux-3.0.4/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
64210 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
64211 pgoff_t pgoff;
64212 unsigned long new_flags = vma->vm_flags;
64213
64214 +#ifdef CONFIG_PAX_SEGMEXEC
64215 + struct vm_area_struct *vma_m;
64216 +#endif
64217 +
64218 switch (behavior) {
64219 case MADV_NORMAL:
64220 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
64221 @@ -110,6 +114,13 @@ success:
64222 /*
64223 * vm_flags is protected by the mmap_sem held in write mode.
64224 */
64225 +
64226 +#ifdef CONFIG_PAX_SEGMEXEC
64227 + vma_m = pax_find_mirror_vma(vma);
64228 + if (vma_m)
64229 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
64230 +#endif
64231 +
64232 vma->vm_flags = new_flags;
64233
64234 out:
64235 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
64236 struct vm_area_struct ** prev,
64237 unsigned long start, unsigned long end)
64238 {
64239 +
64240 +#ifdef CONFIG_PAX_SEGMEXEC
64241 + struct vm_area_struct *vma_m;
64242 +#endif
64243 +
64244 *prev = vma;
64245 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
64246 return -EINVAL;
64247 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
64248 zap_page_range(vma, start, end - start, &details);
64249 } else
64250 zap_page_range(vma, start, end - start, NULL);
64251 +
64252 +#ifdef CONFIG_PAX_SEGMEXEC
64253 + vma_m = pax_find_mirror_vma(vma);
64254 + if (vma_m) {
64255 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
64256 + struct zap_details details = {
64257 + .nonlinear_vma = vma_m,
64258 + .last_index = ULONG_MAX,
64259 + };
64260 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
64261 + } else
64262 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
64263 + }
64264 +#endif
64265 +
64266 return 0;
64267 }
64268
64269 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
64270 if (end < start)
64271 goto out;
64272
64273 +#ifdef CONFIG_PAX_SEGMEXEC
64274 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
64275 + if (end > SEGMEXEC_TASK_SIZE)
64276 + goto out;
64277 + } else
64278 +#endif
64279 +
64280 + if (end > TASK_SIZE)
64281 + goto out;
64282 +
64283 error = 0;
64284 if (end == start)
64285 goto out;
64286 diff -urNp linux-3.0.4/mm/memory.c linux-3.0.4/mm/memory.c
64287 --- linux-3.0.4/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
64288 +++ linux-3.0.4/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
64289 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
64290 return;
64291
64292 pmd = pmd_offset(pud, start);
64293 +
64294 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
64295 pud_clear(pud);
64296 pmd_free_tlb(tlb, pmd, start);
64297 +#endif
64298 +
64299 }
64300
64301 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
64302 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct
64303 if (end - 1 > ceiling - 1)
64304 return;
64305
64306 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
64307 pud = pud_offset(pgd, start);
64308 pgd_clear(pgd);
64309 pud_free_tlb(tlb, pud, start);
64310 +#endif
64311 +
64312 }
64313
64314 /*
64315 @@ -1577,12 +1584,6 @@ no_page_table:
64316 return page;
64317 }
64318
64319 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64320 -{
64321 - return stack_guard_page_start(vma, addr) ||
64322 - stack_guard_page_end(vma, addr+PAGE_SIZE);
64323 -}
64324 -
64325 /**
64326 * __get_user_pages() - pin user pages in memory
64327 * @tsk: task_struct of target task
64328 @@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
64329 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
64330 i = 0;
64331
64332 - do {
64333 + while (nr_pages) {
64334 struct vm_area_struct *vma;
64335
64336 - vma = find_extend_vma(mm, start);
64337 + vma = find_vma(mm, start);
64338 if (!vma && in_gate_area(mm, start)) {
64339 unsigned long pg = start & PAGE_MASK;
64340 pgd_t *pgd;
64341 @@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
64342 goto next_page;
64343 }
64344
64345 - if (!vma ||
64346 + if (!vma || start < vma->vm_start ||
64347 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
64348 !(vm_flags & vma->vm_flags))
64349 return i ? : -EFAULT;
64350 @@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
64351 int ret;
64352 unsigned int fault_flags = 0;
64353
64354 - /* For mlock, just skip the stack guard page. */
64355 - if (foll_flags & FOLL_MLOCK) {
64356 - if (stack_guard_page(vma, start))
64357 - goto next_page;
64358 - }
64359 if (foll_flags & FOLL_WRITE)
64360 fault_flags |= FAULT_FLAG_WRITE;
64361 if (nonblocking)
64362 @@ -1811,7 +1807,7 @@ next_page:
64363 start += PAGE_SIZE;
64364 nr_pages--;
64365 } while (nr_pages && start < vma->vm_end);
64366 - } while (nr_pages);
64367 + }
64368 return i;
64369 }
64370 EXPORT_SYMBOL(__get_user_pages);
64371 @@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
64372 page_add_file_rmap(page);
64373 set_pte_at(mm, addr, pte, mk_pte(page, prot));
64374
64375 +#ifdef CONFIG_PAX_SEGMEXEC
64376 + pax_mirror_file_pte(vma, addr, page, ptl);
64377 +#endif
64378 +
64379 retval = 0;
64380 pte_unmap_unlock(pte, ptl);
64381 return retval;
64382 @@ -2052,10 +2052,22 @@ out:
64383 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
64384 struct page *page)
64385 {
64386 +
64387 +#ifdef CONFIG_PAX_SEGMEXEC
64388 + struct vm_area_struct *vma_m;
64389 +#endif
64390 +
64391 if (addr < vma->vm_start || addr >= vma->vm_end)
64392 return -EFAULT;
64393 if (!page_count(page))
64394 return -EINVAL;
64395 +
64396 +#ifdef CONFIG_PAX_SEGMEXEC
64397 + vma_m = pax_find_mirror_vma(vma);
64398 + if (vma_m)
64399 + vma_m->vm_flags |= VM_INSERTPAGE;
64400 +#endif
64401 +
64402 vma->vm_flags |= VM_INSERTPAGE;
64403 return insert_page(vma, addr, page, vma->vm_page_prot);
64404 }
64405 @@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
64406 unsigned long pfn)
64407 {
64408 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
64409 + BUG_ON(vma->vm_mirror);
64410
64411 if (addr < vma->vm_start || addr >= vma->vm_end)
64412 return -EFAULT;
64413 @@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
64414 copy_user_highpage(dst, src, va, vma);
64415 }
64416
64417 +#ifdef CONFIG_PAX_SEGMEXEC
64418 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
64419 +{
64420 + struct mm_struct *mm = vma->vm_mm;
64421 + spinlock_t *ptl;
64422 + pte_t *pte, entry;
64423 +
64424 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
64425 + entry = *pte;
64426 + if (!pte_present(entry)) {
64427 + if (!pte_none(entry)) {
64428 + BUG_ON(pte_file(entry));
64429 + free_swap_and_cache(pte_to_swp_entry(entry));
64430 + pte_clear_not_present_full(mm, address, pte, 0);
64431 + }
64432 + } else {
64433 + struct page *page;
64434 +
64435 + flush_cache_page(vma, address, pte_pfn(entry));
64436 + entry = ptep_clear_flush(vma, address, pte);
64437 + BUG_ON(pte_dirty(entry));
64438 + page = vm_normal_page(vma, address, entry);
64439 + if (page) {
64440 + update_hiwater_rss(mm);
64441 + if (PageAnon(page))
64442 + dec_mm_counter_fast(mm, MM_ANONPAGES);
64443 + else
64444 + dec_mm_counter_fast(mm, MM_FILEPAGES);
64445 + page_remove_rmap(page);
64446 + page_cache_release(page);
64447 + }
64448 + }
64449 + pte_unmap_unlock(pte, ptl);
64450 +}
64451 +
64452 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
64453 + *
64454 + * the ptl of the lower mapped page is held on entry and is not released on exit
64455 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
64456 + */
64457 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64458 +{
64459 + struct mm_struct *mm = vma->vm_mm;
64460 + unsigned long address_m;
64461 + spinlock_t *ptl_m;
64462 + struct vm_area_struct *vma_m;
64463 + pmd_t *pmd_m;
64464 + pte_t *pte_m, entry_m;
64465 +
64466 + BUG_ON(!page_m || !PageAnon(page_m));
64467 +
64468 + vma_m = pax_find_mirror_vma(vma);
64469 + if (!vma_m)
64470 + return;
64471 +
64472 + BUG_ON(!PageLocked(page_m));
64473 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64474 + address_m = address + SEGMEXEC_TASK_SIZE;
64475 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64476 + pte_m = pte_offset_map(pmd_m, address_m);
64477 + ptl_m = pte_lockptr(mm, pmd_m);
64478 + if (ptl != ptl_m) {
64479 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64480 + if (!pte_none(*pte_m))
64481 + goto out;
64482 + }
64483 +
64484 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64485 + page_cache_get(page_m);
64486 + page_add_anon_rmap(page_m, vma_m, address_m);
64487 + inc_mm_counter_fast(mm, MM_ANONPAGES);
64488 + set_pte_at(mm, address_m, pte_m, entry_m);
64489 + update_mmu_cache(vma_m, address_m, entry_m);
64490 +out:
64491 + if (ptl != ptl_m)
64492 + spin_unlock(ptl_m);
64493 + pte_unmap(pte_m);
64494 + unlock_page(page_m);
64495 +}
64496 +
64497 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64498 +{
64499 + struct mm_struct *mm = vma->vm_mm;
64500 + unsigned long address_m;
64501 + spinlock_t *ptl_m;
64502 + struct vm_area_struct *vma_m;
64503 + pmd_t *pmd_m;
64504 + pte_t *pte_m, entry_m;
64505 +
64506 + BUG_ON(!page_m || PageAnon(page_m));
64507 +
64508 + vma_m = pax_find_mirror_vma(vma);
64509 + if (!vma_m)
64510 + return;
64511 +
64512 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64513 + address_m = address + SEGMEXEC_TASK_SIZE;
64514 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64515 + pte_m = pte_offset_map(pmd_m, address_m);
64516 + ptl_m = pte_lockptr(mm, pmd_m);
64517 + if (ptl != ptl_m) {
64518 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64519 + if (!pte_none(*pte_m))
64520 + goto out;
64521 + }
64522 +
64523 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64524 + page_cache_get(page_m);
64525 + page_add_file_rmap(page_m);
64526 + inc_mm_counter_fast(mm, MM_FILEPAGES);
64527 + set_pte_at(mm, address_m, pte_m, entry_m);
64528 + update_mmu_cache(vma_m, address_m, entry_m);
64529 +out:
64530 + if (ptl != ptl_m)
64531 + spin_unlock(ptl_m);
64532 + pte_unmap(pte_m);
64533 +}
64534 +
64535 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
64536 +{
64537 + struct mm_struct *mm = vma->vm_mm;
64538 + unsigned long address_m;
64539 + spinlock_t *ptl_m;
64540 + struct vm_area_struct *vma_m;
64541 + pmd_t *pmd_m;
64542 + pte_t *pte_m, entry_m;
64543 +
64544 + vma_m = pax_find_mirror_vma(vma);
64545 + if (!vma_m)
64546 + return;
64547 +
64548 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64549 + address_m = address + SEGMEXEC_TASK_SIZE;
64550 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64551 + pte_m = pte_offset_map(pmd_m, address_m);
64552 + ptl_m = pte_lockptr(mm, pmd_m);
64553 + if (ptl != ptl_m) {
64554 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64555 + if (!pte_none(*pte_m))
64556 + goto out;
64557 + }
64558 +
64559 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
64560 + set_pte_at(mm, address_m, pte_m, entry_m);
64561 +out:
64562 + if (ptl != ptl_m)
64563 + spin_unlock(ptl_m);
64564 + pte_unmap(pte_m);
64565 +}
64566 +
64567 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
64568 +{
64569 + struct page *page_m;
64570 + pte_t entry;
64571 +
64572 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
64573 + goto out;
64574 +
64575 + entry = *pte;
64576 + page_m = vm_normal_page(vma, address, entry);
64577 + if (!page_m)
64578 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
64579 + else if (PageAnon(page_m)) {
64580 + if (pax_find_mirror_vma(vma)) {
64581 + pte_unmap_unlock(pte, ptl);
64582 + lock_page(page_m);
64583 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
64584 + if (pte_same(entry, *pte))
64585 + pax_mirror_anon_pte(vma, address, page_m, ptl);
64586 + else
64587 + unlock_page(page_m);
64588 + }
64589 + } else
64590 + pax_mirror_file_pte(vma, address, page_m, ptl);
64591 +
64592 +out:
64593 + pte_unmap_unlock(pte, ptl);
64594 +}
64595 +#endif
64596 +
64597 /*
64598 * This routine handles present pages, when users try to write
64599 * to a shared page. It is done by copying the page to a new address
64600 @@ -2667,6 +2860,12 @@ gotten:
64601 */
64602 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64603 if (likely(pte_same(*page_table, orig_pte))) {
64604 +
64605 +#ifdef CONFIG_PAX_SEGMEXEC
64606 + if (pax_find_mirror_vma(vma))
64607 + BUG_ON(!trylock_page(new_page));
64608 +#endif
64609 +
64610 if (old_page) {
64611 if (!PageAnon(old_page)) {
64612 dec_mm_counter_fast(mm, MM_FILEPAGES);
64613 @@ -2718,6 +2917,10 @@ gotten:
64614 page_remove_rmap(old_page);
64615 }
64616
64617 +#ifdef CONFIG_PAX_SEGMEXEC
64618 + pax_mirror_anon_pte(vma, address, new_page, ptl);
64619 +#endif
64620 +
64621 /* Free the old page.. */
64622 new_page = old_page;
64623 ret |= VM_FAULT_WRITE;
64624 @@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
64625 swap_free(entry);
64626 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
64627 try_to_free_swap(page);
64628 +
64629 +#ifdef CONFIG_PAX_SEGMEXEC
64630 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
64631 +#endif
64632 +
64633 unlock_page(page);
64634 if (swapcache) {
64635 /*
64636 @@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
64637
64638 /* No need to invalidate - it was non-present before */
64639 update_mmu_cache(vma, address, page_table);
64640 +
64641 +#ifdef CONFIG_PAX_SEGMEXEC
64642 + pax_mirror_anon_pte(vma, address, page, ptl);
64643 +#endif
64644 +
64645 unlock:
64646 pte_unmap_unlock(page_table, ptl);
64647 out:
64648 @@ -3039,40 +3252,6 @@ out_release:
64649 }
64650
64651 /*
64652 - * This is like a special single-page "expand_{down|up}wards()",
64653 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
64654 - * doesn't hit another vma.
64655 - */
64656 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
64657 -{
64658 - address &= PAGE_MASK;
64659 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
64660 - struct vm_area_struct *prev = vma->vm_prev;
64661 -
64662 - /*
64663 - * Is there a mapping abutting this one below?
64664 - *
64665 - * That's only ok if it's the same stack mapping
64666 - * that has gotten split..
64667 - */
64668 - if (prev && prev->vm_end == address)
64669 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64670 -
64671 - expand_downwards(vma, address - PAGE_SIZE);
64672 - }
64673 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64674 - struct vm_area_struct *next = vma->vm_next;
64675 -
64676 - /* As VM_GROWSDOWN but s/below/above/ */
64677 - if (next && next->vm_start == address + PAGE_SIZE)
64678 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64679 -
64680 - expand_upwards(vma, address + PAGE_SIZE);
64681 - }
64682 - return 0;
64683 -}
64684 -
64685 -/*
64686 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64687 * but allow concurrent faults), and pte mapped but not yet locked.
64688 * We return with mmap_sem still held, but pte unmapped and unlocked.
64689 @@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
64690 unsigned long address, pte_t *page_table, pmd_t *pmd,
64691 unsigned int flags)
64692 {
64693 - struct page *page;
64694 + struct page *page = NULL;
64695 spinlock_t *ptl;
64696 pte_t entry;
64697
64698 - pte_unmap(page_table);
64699 -
64700 - /* Check if we need to add a guard page to the stack */
64701 - if (check_stack_guard_page(vma, address) < 0)
64702 - return VM_FAULT_SIGBUS;
64703 -
64704 - /* Use the zero-page for reads */
64705 if (!(flags & FAULT_FLAG_WRITE)) {
64706 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64707 vma->vm_page_prot));
64708 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64709 + ptl = pte_lockptr(mm, pmd);
64710 + spin_lock(ptl);
64711 if (!pte_none(*page_table))
64712 goto unlock;
64713 goto setpte;
64714 }
64715
64716 /* Allocate our own private page. */
64717 + pte_unmap(page_table);
64718 +
64719 if (unlikely(anon_vma_prepare(vma)))
64720 goto oom;
64721 page = alloc_zeroed_user_highpage_movable(vma, address);
64722 @@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
64723 if (!pte_none(*page_table))
64724 goto release;
64725
64726 +#ifdef CONFIG_PAX_SEGMEXEC
64727 + if (pax_find_mirror_vma(vma))
64728 + BUG_ON(!trylock_page(page));
64729 +#endif
64730 +
64731 inc_mm_counter_fast(mm, MM_ANONPAGES);
64732 page_add_new_anon_rmap(page, vma, address);
64733 setpte:
64734 @@ -3127,6 +3307,12 @@ setpte:
64735
64736 /* No need to invalidate - it was non-present before */
64737 update_mmu_cache(vma, address, page_table);
64738 +
64739 +#ifdef CONFIG_PAX_SEGMEXEC
64740 + if (page)
64741 + pax_mirror_anon_pte(vma, address, page, ptl);
64742 +#endif
64743 +
64744 unlock:
64745 pte_unmap_unlock(page_table, ptl);
64746 return 0;
64747 @@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
64748 */
64749 /* Only go through if we didn't race with anybody else... */
64750 if (likely(pte_same(*page_table, orig_pte))) {
64751 +
64752 +#ifdef CONFIG_PAX_SEGMEXEC
64753 + if (anon && pax_find_mirror_vma(vma))
64754 + BUG_ON(!trylock_page(page));
64755 +#endif
64756 +
64757 flush_icache_page(vma, page);
64758 entry = mk_pte(page, vma->vm_page_prot);
64759 if (flags & FAULT_FLAG_WRITE)
64760 @@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
64761
64762 /* no need to invalidate: a not-present page won't be cached */
64763 update_mmu_cache(vma, address, page_table);
64764 +
64765 +#ifdef CONFIG_PAX_SEGMEXEC
64766 + if (anon)
64767 + pax_mirror_anon_pte(vma, address, page, ptl);
64768 + else
64769 + pax_mirror_file_pte(vma, address, page, ptl);
64770 +#endif
64771 +
64772 } else {
64773 if (charged)
64774 mem_cgroup_uncharge_page(page);
64775 @@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
64776 if (flags & FAULT_FLAG_WRITE)
64777 flush_tlb_fix_spurious_fault(vma, address);
64778 }
64779 +
64780 +#ifdef CONFIG_PAX_SEGMEXEC
64781 + pax_mirror_pte(vma, address, pte, pmd, ptl);
64782 + return 0;
64783 +#endif
64784 +
64785 unlock:
64786 pte_unmap_unlock(pte, ptl);
64787 return 0;
64788 @@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
64789 pmd_t *pmd;
64790 pte_t *pte;
64791
64792 +#ifdef CONFIG_PAX_SEGMEXEC
64793 + struct vm_area_struct *vma_m;
64794 +#endif
64795 +
64796 __set_current_state(TASK_RUNNING);
64797
64798 count_vm_event(PGFAULT);
64799 @@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
64800 if (unlikely(is_vm_hugetlb_page(vma)))
64801 return hugetlb_fault(mm, vma, address, flags);
64802
64803 +#ifdef CONFIG_PAX_SEGMEXEC
64804 + vma_m = pax_find_mirror_vma(vma);
64805 + if (vma_m) {
64806 + unsigned long address_m;
64807 + pgd_t *pgd_m;
64808 + pud_t *pud_m;
64809 + pmd_t *pmd_m;
64810 +
64811 + if (vma->vm_start > vma_m->vm_start) {
64812 + address_m = address;
64813 + address -= SEGMEXEC_TASK_SIZE;
64814 + vma = vma_m;
64815 + } else
64816 + address_m = address + SEGMEXEC_TASK_SIZE;
64817 +
64818 + pgd_m = pgd_offset(mm, address_m);
64819 + pud_m = pud_alloc(mm, pgd_m, address_m);
64820 + if (!pud_m)
64821 + return VM_FAULT_OOM;
64822 + pmd_m = pmd_alloc(mm, pud_m, address_m);
64823 + if (!pmd_m)
64824 + return VM_FAULT_OOM;
64825 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
64826 + return VM_FAULT_OOM;
64827 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64828 + }
64829 +#endif
64830 +
64831 pgd = pgd_offset(mm, address);
64832 pud = pud_alloc(mm, pgd, address);
64833 if (!pud)
64834 @@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
64835 * run pte_offset_map on the pmd, if an huge pmd could
64836 * materialize from under us from a different thread.
64837 */
64838 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
64839 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
64840 return VM_FAULT_OOM;
64841 /* if an huge pmd materialized from under us just retry later */
64842 if (unlikely(pmd_trans_huge(*pmd)))
64843 @@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
64844 gate_vma.vm_start = FIXADDR_USER_START;
64845 gate_vma.vm_end = FIXADDR_USER_END;
64846 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64847 - gate_vma.vm_page_prot = __P101;
64848 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64849 /*
64850 * Make sure the vDSO gets into every core dump.
64851 * Dumping its contents makes post-mortem fully interpretable later
64852 diff -urNp linux-3.0.4/mm/memory-failure.c linux-3.0.4/mm/memory-failure.c
64853 --- linux-3.0.4/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
64854 +++ linux-3.0.4/mm/memory-failure.c 2011-08-23 21:47:56.000000000 -0400
64855 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
64856
64857 int sysctl_memory_failure_recovery __read_mostly = 1;
64858
64859 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64860 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64861
64862 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
64863
64864 @@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
64865 }
64866
64867 nr_pages = 1 << compound_trans_order(hpage);
64868 - atomic_long_add(nr_pages, &mce_bad_pages);
64869 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
64870
64871 /*
64872 * We need/can do nothing about count=0 pages.
64873 @@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
64874 if (!PageHWPoison(hpage)
64875 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
64876 || (p != hpage && TestSetPageHWPoison(hpage))) {
64877 - atomic_long_sub(nr_pages, &mce_bad_pages);
64878 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64879 return 0;
64880 }
64881 set_page_hwpoison_huge_page(hpage);
64882 @@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
64883 }
64884 if (hwpoison_filter(p)) {
64885 if (TestClearPageHWPoison(p))
64886 - atomic_long_sub(nr_pages, &mce_bad_pages);
64887 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64888 unlock_page(hpage);
64889 put_page(hpage);
64890 return 0;
64891 @@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
64892 return 0;
64893 }
64894 if (TestClearPageHWPoison(p))
64895 - atomic_long_sub(nr_pages, &mce_bad_pages);
64896 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64897 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
64898 return 0;
64899 }
64900 @@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
64901 */
64902 if (TestClearPageHWPoison(page)) {
64903 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
64904 - atomic_long_sub(nr_pages, &mce_bad_pages);
64905 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
64906 freeit = 1;
64907 if (PageHuge(page))
64908 clear_page_hwpoison_huge_page(page);
64909 @@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
64910 }
64911 done:
64912 if (!PageHWPoison(hpage))
64913 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
64914 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
64915 set_page_hwpoison_huge_page(hpage);
64916 dequeue_hwpoisoned_huge_page(hpage);
64917 /* keep elevated page count for bad page */
64918 @@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
64919 return ret;
64920
64921 done:
64922 - atomic_long_add(1, &mce_bad_pages);
64923 + atomic_long_add_unchecked(1, &mce_bad_pages);
64924 SetPageHWPoison(page);
64925 /* keep elevated page count for bad page */
64926 return ret;
64927 diff -urNp linux-3.0.4/mm/mempolicy.c linux-3.0.4/mm/mempolicy.c
64928 --- linux-3.0.4/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
64929 +++ linux-3.0.4/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
64930 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
64931 unsigned long vmstart;
64932 unsigned long vmend;
64933
64934 +#ifdef CONFIG_PAX_SEGMEXEC
64935 + struct vm_area_struct *vma_m;
64936 +#endif
64937 +
64938 vma = find_vma_prev(mm, start, &prev);
64939 if (!vma || vma->vm_start > start)
64940 return -EFAULT;
64941 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
64942 err = policy_vma(vma, new_pol);
64943 if (err)
64944 goto out;
64945 +
64946 +#ifdef CONFIG_PAX_SEGMEXEC
64947 + vma_m = pax_find_mirror_vma(vma);
64948 + if (vma_m) {
64949 + err = policy_vma(vma_m, new_pol);
64950 + if (err)
64951 + goto out;
64952 + }
64953 +#endif
64954 +
64955 }
64956
64957 out:
64958 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
64959
64960 if (end < start)
64961 return -EINVAL;
64962 +
64963 +#ifdef CONFIG_PAX_SEGMEXEC
64964 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64965 + if (end > SEGMEXEC_TASK_SIZE)
64966 + return -EINVAL;
64967 + } else
64968 +#endif
64969 +
64970 + if (end > TASK_SIZE)
64971 + return -EINVAL;
64972 +
64973 if (end == start)
64974 return 0;
64975
64976 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64977 if (!mm)
64978 goto out;
64979
64980 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64981 + if (mm != current->mm &&
64982 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64983 + err = -EPERM;
64984 + goto out;
64985 + }
64986 +#endif
64987 +
64988 /*
64989 * Check if this process has the right to modify the specified
64990 * process. The right exists if the process has administrative
64991 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64992 rcu_read_lock();
64993 tcred = __task_cred(task);
64994 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64995 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64996 - !capable(CAP_SYS_NICE)) {
64997 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64998 rcu_read_unlock();
64999 err = -EPERM;
65000 goto out;
65001 diff -urNp linux-3.0.4/mm/migrate.c linux-3.0.4/mm/migrate.c
65002 --- linux-3.0.4/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
65003 +++ linux-3.0.4/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
65004 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
65005 unsigned long chunk_start;
65006 int err;
65007
65008 + pax_track_stack();
65009 +
65010 task_nodes = cpuset_mems_allowed(task);
65011
65012 err = -ENOMEM;
65013 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
65014 if (!mm)
65015 return -EINVAL;
65016
65017 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65018 + if (mm != current->mm &&
65019 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
65020 + err = -EPERM;
65021 + goto out;
65022 + }
65023 +#endif
65024 +
65025 /*
65026 * Check if this process has the right to modify the specified
65027 * process. The right exists if the process has administrative
65028 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
65029 rcu_read_lock();
65030 tcred = __task_cred(task);
65031 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
65032 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
65033 - !capable(CAP_SYS_NICE)) {
65034 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
65035 rcu_read_unlock();
65036 err = -EPERM;
65037 goto out;
65038 diff -urNp linux-3.0.4/mm/mlock.c linux-3.0.4/mm/mlock.c
65039 --- linux-3.0.4/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
65040 +++ linux-3.0.4/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
65041 @@ -13,6 +13,7 @@
65042 #include <linux/pagemap.h>
65043 #include <linux/mempolicy.h>
65044 #include <linux/syscalls.h>
65045 +#include <linux/security.h>
65046 #include <linux/sched.h>
65047 #include <linux/module.h>
65048 #include <linux/rmap.h>
65049 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
65050 return -EINVAL;
65051 if (end == start)
65052 return 0;
65053 + if (end > TASK_SIZE)
65054 + return -EINVAL;
65055 +
65056 vma = find_vma_prev(current->mm, start, &prev);
65057 if (!vma || vma->vm_start > start)
65058 return -ENOMEM;
65059 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
65060 for (nstart = start ; ; ) {
65061 vm_flags_t newflags;
65062
65063 +#ifdef CONFIG_PAX_SEGMEXEC
65064 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
65065 + break;
65066 +#endif
65067 +
65068 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
65069
65070 newflags = vma->vm_flags | VM_LOCKED;
65071 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
65072 lock_limit >>= PAGE_SHIFT;
65073
65074 /* check against resource limits */
65075 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
65076 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
65077 error = do_mlock(start, len, 1);
65078 up_write(&current->mm->mmap_sem);
65079 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
65080 static int do_mlockall(int flags)
65081 {
65082 struct vm_area_struct * vma, * prev = NULL;
65083 - unsigned int def_flags = 0;
65084
65085 if (flags & MCL_FUTURE)
65086 - def_flags = VM_LOCKED;
65087 - current->mm->def_flags = def_flags;
65088 + current->mm->def_flags |= VM_LOCKED;
65089 + else
65090 + current->mm->def_flags &= ~VM_LOCKED;
65091 if (flags == MCL_FUTURE)
65092 goto out;
65093
65094 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
65095 vm_flags_t newflags;
65096
65097 +#ifdef CONFIG_PAX_SEGMEXEC
65098 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
65099 + break;
65100 +#endif
65101 +
65102 + BUG_ON(vma->vm_end > TASK_SIZE);
65103 newflags = vma->vm_flags | VM_LOCKED;
65104 if (!(flags & MCL_CURRENT))
65105 newflags &= ~VM_LOCKED;
65106 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
65107 lock_limit >>= PAGE_SHIFT;
65108
65109 ret = -ENOMEM;
65110 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
65111 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
65112 capable(CAP_IPC_LOCK))
65113 ret = do_mlockall(flags);
65114 diff -urNp linux-3.0.4/mm/mmap.c linux-3.0.4/mm/mmap.c
65115 --- linux-3.0.4/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
65116 +++ linux-3.0.4/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
65117 @@ -46,6 +46,16 @@
65118 #define arch_rebalance_pgtables(addr, len) (addr)
65119 #endif
65120
65121 +static inline void verify_mm_writelocked(struct mm_struct *mm)
65122 +{
65123 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
65124 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65125 + up_read(&mm->mmap_sem);
65126 + BUG();
65127 + }
65128 +#endif
65129 +}
65130 +
65131 static void unmap_region(struct mm_struct *mm,
65132 struct vm_area_struct *vma, struct vm_area_struct *prev,
65133 unsigned long start, unsigned long end);
65134 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
65135 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
65136 *
65137 */
65138 -pgprot_t protection_map[16] = {
65139 +pgprot_t protection_map[16] __read_only = {
65140 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
65141 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
65142 };
65143
65144 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
65145 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
65146 {
65147 - return __pgprot(pgprot_val(protection_map[vm_flags &
65148 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
65149 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
65150 pgprot_val(arch_vm_get_page_prot(vm_flags)));
65151 +
65152 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65153 + if (!(__supported_pte_mask & _PAGE_NX) &&
65154 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
65155 + (vm_flags & (VM_READ | VM_WRITE)))
65156 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
65157 +#endif
65158 +
65159 + return prot;
65160 }
65161 EXPORT_SYMBOL(vm_get_page_prot);
65162
65163 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
65164 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
65165 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
65166 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
65167 /*
65168 * Make sure vm_committed_as in one cacheline and not cacheline shared with
65169 * other variables. It can be updated by several CPUs frequently.
65170 @@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
65171 struct vm_area_struct *next = vma->vm_next;
65172
65173 might_sleep();
65174 + BUG_ON(vma->vm_mirror);
65175 if (vma->vm_ops && vma->vm_ops->close)
65176 vma->vm_ops->close(vma);
65177 if (vma->vm_file) {
65178 @@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
65179 * not page aligned -Ram Gupta
65180 */
65181 rlim = rlimit(RLIMIT_DATA);
65182 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
65183 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
65184 (mm->end_data - mm->start_data) > rlim)
65185 goto out;
65186 @@ -697,6 +719,12 @@ static int
65187 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
65188 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65189 {
65190 +
65191 +#ifdef CONFIG_PAX_SEGMEXEC
65192 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
65193 + return 0;
65194 +#endif
65195 +
65196 if (is_mergeable_vma(vma, file, vm_flags) &&
65197 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
65198 if (vma->vm_pgoff == vm_pgoff)
65199 @@ -716,6 +744,12 @@ static int
65200 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
65201 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65202 {
65203 +
65204 +#ifdef CONFIG_PAX_SEGMEXEC
65205 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
65206 + return 0;
65207 +#endif
65208 +
65209 if (is_mergeable_vma(vma, file, vm_flags) &&
65210 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
65211 pgoff_t vm_pglen;
65212 @@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
65213 struct vm_area_struct *vma_merge(struct mm_struct *mm,
65214 struct vm_area_struct *prev, unsigned long addr,
65215 unsigned long end, unsigned long vm_flags,
65216 - struct anon_vma *anon_vma, struct file *file,
65217 + struct anon_vma *anon_vma, struct file *file,
65218 pgoff_t pgoff, struct mempolicy *policy)
65219 {
65220 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
65221 struct vm_area_struct *area, *next;
65222 int err;
65223
65224 +#ifdef CONFIG_PAX_SEGMEXEC
65225 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
65226 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
65227 +
65228 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
65229 +#endif
65230 +
65231 /*
65232 * We later require that vma->vm_flags == vm_flags,
65233 * so this tests vma->vm_flags & VM_SPECIAL, too.
65234 @@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
65235 if (next && next->vm_end == end) /* cases 6, 7, 8 */
65236 next = next->vm_next;
65237
65238 +#ifdef CONFIG_PAX_SEGMEXEC
65239 + if (prev)
65240 + prev_m = pax_find_mirror_vma(prev);
65241 + if (area)
65242 + area_m = pax_find_mirror_vma(area);
65243 + if (next)
65244 + next_m = pax_find_mirror_vma(next);
65245 +#endif
65246 +
65247 /*
65248 * Can it merge with the predecessor?
65249 */
65250 @@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
65251 /* cases 1, 6 */
65252 err = vma_adjust(prev, prev->vm_start,
65253 next->vm_end, prev->vm_pgoff, NULL);
65254 - } else /* cases 2, 5, 7 */
65255 +
65256 +#ifdef CONFIG_PAX_SEGMEXEC
65257 + if (!err && prev_m)
65258 + err = vma_adjust(prev_m, prev_m->vm_start,
65259 + next_m->vm_end, prev_m->vm_pgoff, NULL);
65260 +#endif
65261 +
65262 + } else { /* cases 2, 5, 7 */
65263 err = vma_adjust(prev, prev->vm_start,
65264 end, prev->vm_pgoff, NULL);
65265 +
65266 +#ifdef CONFIG_PAX_SEGMEXEC
65267 + if (!err && prev_m)
65268 + err = vma_adjust(prev_m, prev_m->vm_start,
65269 + end_m, prev_m->vm_pgoff, NULL);
65270 +#endif
65271 +
65272 + }
65273 if (err)
65274 return NULL;
65275 khugepaged_enter_vma_merge(prev);
65276 @@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
65277 mpol_equal(policy, vma_policy(next)) &&
65278 can_vma_merge_before(next, vm_flags,
65279 anon_vma, file, pgoff+pglen)) {
65280 - if (prev && addr < prev->vm_end) /* case 4 */
65281 + if (prev && addr < prev->vm_end) { /* case 4 */
65282 err = vma_adjust(prev, prev->vm_start,
65283 addr, prev->vm_pgoff, NULL);
65284 - else /* cases 3, 8 */
65285 +
65286 +#ifdef CONFIG_PAX_SEGMEXEC
65287 + if (!err && prev_m)
65288 + err = vma_adjust(prev_m, prev_m->vm_start,
65289 + addr_m, prev_m->vm_pgoff, NULL);
65290 +#endif
65291 +
65292 + } else { /* cases 3, 8 */
65293 err = vma_adjust(area, addr, next->vm_end,
65294 next->vm_pgoff - pglen, NULL);
65295 +
65296 +#ifdef CONFIG_PAX_SEGMEXEC
65297 + if (!err && area_m)
65298 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
65299 + next_m->vm_pgoff - pglen, NULL);
65300 +#endif
65301 +
65302 + }
65303 if (err)
65304 return NULL;
65305 khugepaged_enter_vma_merge(area);
65306 @@ -929,14 +1009,11 @@ none:
65307 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
65308 struct file *file, long pages)
65309 {
65310 - const unsigned long stack_flags
65311 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
65312 -
65313 if (file) {
65314 mm->shared_vm += pages;
65315 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
65316 mm->exec_vm += pages;
65317 - } else if (flags & stack_flags)
65318 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
65319 mm->stack_vm += pages;
65320 if (flags & (VM_RESERVED|VM_IO))
65321 mm->reserved_vm += pages;
65322 @@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
65323 * (the exception is when the underlying filesystem is noexec
65324 * mounted, in which case we dont add PROT_EXEC.)
65325 */
65326 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65327 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65328 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
65329 prot |= PROT_EXEC;
65330
65331 @@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
65332 /* Obtain the address to map to. we verify (or select) it and ensure
65333 * that it represents a valid section of the address space.
65334 */
65335 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
65336 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
65337 if (addr & ~PAGE_MASK)
65338 return addr;
65339
65340 @@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
65341 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
65342 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
65343
65344 +#ifdef CONFIG_PAX_MPROTECT
65345 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65346 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65347 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
65348 + gr_log_rwxmmap(file);
65349 +
65350 +#ifdef CONFIG_PAX_EMUPLT
65351 + vm_flags &= ~VM_EXEC;
65352 +#else
65353 + return -EPERM;
65354 +#endif
65355 +
65356 + }
65357 +
65358 + if (!(vm_flags & VM_EXEC))
65359 + vm_flags &= ~VM_MAYEXEC;
65360 +#else
65361 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65362 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65363 +#endif
65364 + else
65365 + vm_flags &= ~VM_MAYWRITE;
65366 + }
65367 +#endif
65368 +
65369 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65370 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
65371 + vm_flags &= ~VM_PAGEEXEC;
65372 +#endif
65373 +
65374 if (flags & MAP_LOCKED)
65375 if (!can_do_mlock())
65376 return -EPERM;
65377 @@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
65378 locked += mm->locked_vm;
65379 lock_limit = rlimit(RLIMIT_MEMLOCK);
65380 lock_limit >>= PAGE_SHIFT;
65381 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65382 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
65383 return -EAGAIN;
65384 }
65385 @@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
65386 if (error)
65387 return error;
65388
65389 + if (!gr_acl_handle_mmap(file, prot))
65390 + return -EACCES;
65391 +
65392 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
65393 }
65394 EXPORT_SYMBOL(do_mmap_pgoff);
65395 @@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
65396 vm_flags_t vm_flags = vma->vm_flags;
65397
65398 /* If it was private or non-writable, the write bit is already clear */
65399 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
65400 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
65401 return 0;
65402
65403 /* The backer wishes to know when pages are first written to? */
65404 @@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
65405 unsigned long charged = 0;
65406 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
65407
65408 +#ifdef CONFIG_PAX_SEGMEXEC
65409 + struct vm_area_struct *vma_m = NULL;
65410 +#endif
65411 +
65412 + /*
65413 + * mm->mmap_sem is required to protect against another thread
65414 + * changing the mappings in case we sleep.
65415 + */
65416 + verify_mm_writelocked(mm);
65417 +
65418 /* Clear old maps */
65419 error = -ENOMEM;
65420 -munmap_back:
65421 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65422 if (vma && vma->vm_start < addr + len) {
65423 if (do_munmap(mm, addr, len))
65424 return -ENOMEM;
65425 - goto munmap_back;
65426 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65427 + BUG_ON(vma && vma->vm_start < addr + len);
65428 }
65429
65430 /* Check against address space limit. */
65431 @@ -1266,6 +1387,16 @@ munmap_back:
65432 goto unacct_error;
65433 }
65434
65435 +#ifdef CONFIG_PAX_SEGMEXEC
65436 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
65437 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65438 + if (!vma_m) {
65439 + error = -ENOMEM;
65440 + goto free_vma;
65441 + }
65442 + }
65443 +#endif
65444 +
65445 vma->vm_mm = mm;
65446 vma->vm_start = addr;
65447 vma->vm_end = addr + len;
65448 @@ -1289,6 +1420,19 @@ munmap_back:
65449 error = file->f_op->mmap(file, vma);
65450 if (error)
65451 goto unmap_and_free_vma;
65452 +
65453 +#ifdef CONFIG_PAX_SEGMEXEC
65454 + if (vma_m && (vm_flags & VM_EXECUTABLE))
65455 + added_exe_file_vma(mm);
65456 +#endif
65457 +
65458 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65459 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
65460 + vma->vm_flags |= VM_PAGEEXEC;
65461 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65462 + }
65463 +#endif
65464 +
65465 if (vm_flags & VM_EXECUTABLE)
65466 added_exe_file_vma(mm);
65467
65468 @@ -1324,6 +1468,11 @@ munmap_back:
65469 vma_link(mm, vma, prev, rb_link, rb_parent);
65470 file = vma->vm_file;
65471
65472 +#ifdef CONFIG_PAX_SEGMEXEC
65473 + if (vma_m)
65474 + BUG_ON(pax_mirror_vma(vma_m, vma));
65475 +#endif
65476 +
65477 /* Once vma denies write, undo our temporary denial count */
65478 if (correct_wcount)
65479 atomic_inc(&inode->i_writecount);
65480 @@ -1332,6 +1481,7 @@ out:
65481
65482 mm->total_vm += len >> PAGE_SHIFT;
65483 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
65484 + track_exec_limit(mm, addr, addr + len, vm_flags);
65485 if (vm_flags & VM_LOCKED) {
65486 if (!mlock_vma_pages_range(vma, addr, addr + len))
65487 mm->locked_vm += (len >> PAGE_SHIFT);
65488 @@ -1349,6 +1499,12 @@ unmap_and_free_vma:
65489 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
65490 charged = 0;
65491 free_vma:
65492 +
65493 +#ifdef CONFIG_PAX_SEGMEXEC
65494 + if (vma_m)
65495 + kmem_cache_free(vm_area_cachep, vma_m);
65496 +#endif
65497 +
65498 kmem_cache_free(vm_area_cachep, vma);
65499 unacct_error:
65500 if (charged)
65501 @@ -1356,6 +1512,44 @@ unacct_error:
65502 return error;
65503 }
65504
65505 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
65506 +{
65507 + if (!vma) {
65508 +#ifdef CONFIG_STACK_GROWSUP
65509 + if (addr > sysctl_heap_stack_gap)
65510 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
65511 + else
65512 + vma = find_vma(current->mm, 0);
65513 + if (vma && (vma->vm_flags & VM_GROWSUP))
65514 + return false;
65515 +#endif
65516 + return true;
65517 + }
65518 +
65519 + if (addr + len > vma->vm_start)
65520 + return false;
65521 +
65522 + if (vma->vm_flags & VM_GROWSDOWN)
65523 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
65524 +#ifdef CONFIG_STACK_GROWSUP
65525 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
65526 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
65527 +#endif
65528 +
65529 + return true;
65530 +}
65531 +
65532 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
65533 +{
65534 + if (vma->vm_start < len)
65535 + return -ENOMEM;
65536 + if (!(vma->vm_flags & VM_GROWSDOWN))
65537 + return vma->vm_start - len;
65538 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
65539 + return vma->vm_start - len - sysctl_heap_stack_gap;
65540 + return -ENOMEM;
65541 +}
65542 +
65543 /* Get an address range which is currently unmapped.
65544 * For shmat() with addr=0.
65545 *
65546 @@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
65547 if (flags & MAP_FIXED)
65548 return addr;
65549
65550 +#ifdef CONFIG_PAX_RANDMMAP
65551 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65552 +#endif
65553 +
65554 if (addr) {
65555 addr = PAGE_ALIGN(addr);
65556 - vma = find_vma(mm, addr);
65557 - if (TASK_SIZE - len >= addr &&
65558 - (!vma || addr + len <= vma->vm_start))
65559 - return addr;
65560 + if (TASK_SIZE - len >= addr) {
65561 + vma = find_vma(mm, addr);
65562 + if (check_heap_stack_gap(vma, addr, len))
65563 + return addr;
65564 + }
65565 }
65566 if (len > mm->cached_hole_size) {
65567 - start_addr = addr = mm->free_area_cache;
65568 + start_addr = addr = mm->free_area_cache;
65569 } else {
65570 - start_addr = addr = TASK_UNMAPPED_BASE;
65571 - mm->cached_hole_size = 0;
65572 + start_addr = addr = mm->mmap_base;
65573 + mm->cached_hole_size = 0;
65574 }
65575
65576 full_search:
65577 @@ -1404,34 +1603,40 @@ full_search:
65578 * Start a new search - just in case we missed
65579 * some holes.
65580 */
65581 - if (start_addr != TASK_UNMAPPED_BASE) {
65582 - addr = TASK_UNMAPPED_BASE;
65583 - start_addr = addr;
65584 + if (start_addr != mm->mmap_base) {
65585 + start_addr = addr = mm->mmap_base;
65586 mm->cached_hole_size = 0;
65587 goto full_search;
65588 }
65589 return -ENOMEM;
65590 }
65591 - if (!vma || addr + len <= vma->vm_start) {
65592 - /*
65593 - * Remember the place where we stopped the search:
65594 - */
65595 - mm->free_area_cache = addr + len;
65596 - return addr;
65597 - }
65598 + if (check_heap_stack_gap(vma, addr, len))
65599 + break;
65600 if (addr + mm->cached_hole_size < vma->vm_start)
65601 mm->cached_hole_size = vma->vm_start - addr;
65602 addr = vma->vm_end;
65603 }
65604 +
65605 + /*
65606 + * Remember the place where we stopped the search:
65607 + */
65608 + mm->free_area_cache = addr + len;
65609 + return addr;
65610 }
65611 #endif
65612
65613 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
65614 {
65615 +
65616 +#ifdef CONFIG_PAX_SEGMEXEC
65617 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65618 + return;
65619 +#endif
65620 +
65621 /*
65622 * Is this a new hole at the lowest possible address?
65623 */
65624 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
65625 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
65626 mm->free_area_cache = addr;
65627 mm->cached_hole_size = ~0UL;
65628 }
65629 @@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
65630 {
65631 struct vm_area_struct *vma;
65632 struct mm_struct *mm = current->mm;
65633 - unsigned long addr = addr0;
65634 + unsigned long base = mm->mmap_base, addr = addr0;
65635
65636 /* requested length too big for entire address space */
65637 if (len > TASK_SIZE)
65638 @@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
65639 if (flags & MAP_FIXED)
65640 return addr;
65641
65642 +#ifdef CONFIG_PAX_RANDMMAP
65643 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65644 +#endif
65645 +
65646 /* requesting a specific address */
65647 if (addr) {
65648 addr = PAGE_ALIGN(addr);
65649 - vma = find_vma(mm, addr);
65650 - if (TASK_SIZE - len >= addr &&
65651 - (!vma || addr + len <= vma->vm_start))
65652 - return addr;
65653 + if (TASK_SIZE - len >= addr) {
65654 + vma = find_vma(mm, addr);
65655 + if (check_heap_stack_gap(vma, addr, len))
65656 + return addr;
65657 + }
65658 }
65659
65660 /* check if free_area_cache is useful for us */
65661 @@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
65662 /* make sure it can fit in the remaining address space */
65663 if (addr > len) {
65664 vma = find_vma(mm, addr-len);
65665 - if (!vma || addr <= vma->vm_start)
65666 + if (check_heap_stack_gap(vma, addr - len, len))
65667 /* remember the address as a hint for next time */
65668 return (mm->free_area_cache = addr-len);
65669 }
65670 @@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
65671 * return with success:
65672 */
65673 vma = find_vma(mm, addr);
65674 - if (!vma || addr+len <= vma->vm_start)
65675 + if (check_heap_stack_gap(vma, addr, len))
65676 /* remember the address as a hint for next time */
65677 return (mm->free_area_cache = addr);
65678
65679 @@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
65680 mm->cached_hole_size = vma->vm_start - addr;
65681
65682 /* try just below the current vma->vm_start */
65683 - addr = vma->vm_start-len;
65684 - } while (len < vma->vm_start);
65685 + addr = skip_heap_stack_gap(vma, len);
65686 + } while (!IS_ERR_VALUE(addr));
65687
65688 bottomup:
65689 /*
65690 @@ -1515,13 +1725,21 @@ bottomup:
65691 * can happen with large stack limits and large mmap()
65692 * allocations.
65693 */
65694 + mm->mmap_base = TASK_UNMAPPED_BASE;
65695 +
65696 +#ifdef CONFIG_PAX_RANDMMAP
65697 + if (mm->pax_flags & MF_PAX_RANDMMAP)
65698 + mm->mmap_base += mm->delta_mmap;
65699 +#endif
65700 +
65701 + mm->free_area_cache = mm->mmap_base;
65702 mm->cached_hole_size = ~0UL;
65703 - mm->free_area_cache = TASK_UNMAPPED_BASE;
65704 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65705 /*
65706 * Restore the topdown base:
65707 */
65708 - mm->free_area_cache = mm->mmap_base;
65709 + mm->mmap_base = base;
65710 + mm->free_area_cache = base;
65711 mm->cached_hole_size = ~0UL;
65712
65713 return addr;
65714 @@ -1530,6 +1748,12 @@ bottomup:
65715
65716 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65717 {
65718 +
65719 +#ifdef CONFIG_PAX_SEGMEXEC
65720 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65721 + return;
65722 +#endif
65723 +
65724 /*
65725 * Is this a new hole at the highest possible address?
65726 */
65727 @@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
65728 mm->free_area_cache = addr;
65729
65730 /* dont allow allocations above current base */
65731 - if (mm->free_area_cache > mm->mmap_base)
65732 + if (mm->free_area_cache > mm->mmap_base) {
65733 mm->free_area_cache = mm->mmap_base;
65734 + mm->cached_hole_size = ~0UL;
65735 + }
65736 }
65737
65738 unsigned long
65739 @@ -1646,6 +1872,28 @@ out:
65740 return prev ? prev->vm_next : vma;
65741 }
65742
65743 +#ifdef CONFIG_PAX_SEGMEXEC
65744 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65745 +{
65746 + struct vm_area_struct *vma_m;
65747 +
65748 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65749 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65750 + BUG_ON(vma->vm_mirror);
65751 + return NULL;
65752 + }
65753 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65754 + vma_m = vma->vm_mirror;
65755 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65756 + BUG_ON(vma->vm_file != vma_m->vm_file);
65757 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65758 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
65759 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
65760 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65761 + return vma_m;
65762 +}
65763 +#endif
65764 +
65765 /*
65766 * Verify that the stack growth is acceptable and
65767 * update accounting. This is shared with both the
65768 @@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
65769 return -ENOMEM;
65770
65771 /* Stack limit test */
65772 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
65773 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
65774 return -ENOMEM;
65775
65776 @@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
65777 locked = mm->locked_vm + grow;
65778 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
65779 limit >>= PAGE_SHIFT;
65780 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65781 if (locked > limit && !capable(CAP_IPC_LOCK))
65782 return -ENOMEM;
65783 }
65784 @@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
65785 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65786 * vma is the last one with address > vma->vm_end. Have to extend vma.
65787 */
65788 +#ifndef CONFIG_IA64
65789 +static
65790 +#endif
65791 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65792 {
65793 int error;
65794 + bool locknext;
65795
65796 if (!(vma->vm_flags & VM_GROWSUP))
65797 return -EFAULT;
65798
65799 + /* Also guard against wrapping around to address 0. */
65800 + if (address < PAGE_ALIGN(address+1))
65801 + address = PAGE_ALIGN(address+1);
65802 + else
65803 + return -ENOMEM;
65804 +
65805 /*
65806 * We must make sure the anon_vma is allocated
65807 * so that the anon_vma locking is not a noop.
65808 */
65809 if (unlikely(anon_vma_prepare(vma)))
65810 return -ENOMEM;
65811 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65812 + if (locknext && anon_vma_prepare(vma->vm_next))
65813 + return -ENOMEM;
65814 vma_lock_anon_vma(vma);
65815 + if (locknext)
65816 + vma_lock_anon_vma(vma->vm_next);
65817
65818 /*
65819 * vma->vm_start/vm_end cannot change under us because the caller
65820 * is required to hold the mmap_sem in read mode. We need the
65821 - * anon_vma lock to serialize against concurrent expand_stacks.
65822 - * Also guard against wrapping around to address 0.
65823 + * anon_vma locks to serialize against concurrent expand_stacks
65824 + * and expand_upwards.
65825 */
65826 - if (address < PAGE_ALIGN(address+4))
65827 - address = PAGE_ALIGN(address+4);
65828 - else {
65829 - vma_unlock_anon_vma(vma);
65830 - return -ENOMEM;
65831 - }
65832 error = 0;
65833
65834 /* Somebody else might have raced and expanded it already */
65835 - if (address > vma->vm_end) {
65836 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65837 + error = -ENOMEM;
65838 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65839 unsigned long size, grow;
65840
65841 size = address - vma->vm_start;
65842 @@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
65843 }
65844 }
65845 }
65846 + if (locknext)
65847 + vma_unlock_anon_vma(vma->vm_next);
65848 vma_unlock_anon_vma(vma);
65849 khugepaged_enter_vma_merge(vma);
65850 return error;
65851 @@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
65852 unsigned long address)
65853 {
65854 int error;
65855 + bool lockprev = false;
65856 + struct vm_area_struct *prev;
65857
65858 /*
65859 * We must make sure the anon_vma is allocated
65860 @@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
65861 if (error)
65862 return error;
65863
65864 + prev = vma->vm_prev;
65865 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65866 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65867 +#endif
65868 + if (lockprev && anon_vma_prepare(prev))
65869 + return -ENOMEM;
65870 + if (lockprev)
65871 + vma_lock_anon_vma(prev);
65872 +
65873 vma_lock_anon_vma(vma);
65874
65875 /*
65876 @@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
65877 */
65878
65879 /* Somebody else might have raced and expanded it already */
65880 - if (address < vma->vm_start) {
65881 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65882 + error = -ENOMEM;
65883 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65884 unsigned long size, grow;
65885
65886 +#ifdef CONFIG_PAX_SEGMEXEC
65887 + struct vm_area_struct *vma_m;
65888 +
65889 + vma_m = pax_find_mirror_vma(vma);
65890 +#endif
65891 +
65892 size = vma->vm_end - address;
65893 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65894
65895 @@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
65896 if (!error) {
65897 vma->vm_start = address;
65898 vma->vm_pgoff -= grow;
65899 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65900 +
65901 +#ifdef CONFIG_PAX_SEGMEXEC
65902 + if (vma_m) {
65903 + vma_m->vm_start -= grow << PAGE_SHIFT;
65904 + vma_m->vm_pgoff -= grow;
65905 + }
65906 +#endif
65907 +
65908 perf_event_mmap(vma);
65909 }
65910 }
65911 }
65912 vma_unlock_anon_vma(vma);
65913 + if (lockprev)
65914 + vma_unlock_anon_vma(prev);
65915 khugepaged_enter_vma_merge(vma);
65916 return error;
65917 }
65918 @@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
65919 do {
65920 long nrpages = vma_pages(vma);
65921
65922 +#ifdef CONFIG_PAX_SEGMEXEC
65923 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65924 + vma = remove_vma(vma);
65925 + continue;
65926 + }
65927 +#endif
65928 +
65929 mm->total_vm -= nrpages;
65930 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65931 vma = remove_vma(vma);
65932 @@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65933 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65934 vma->vm_prev = NULL;
65935 do {
65936 +
65937 +#ifdef CONFIG_PAX_SEGMEXEC
65938 + if (vma->vm_mirror) {
65939 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65940 + vma->vm_mirror->vm_mirror = NULL;
65941 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
65942 + vma->vm_mirror = NULL;
65943 + }
65944 +#endif
65945 +
65946 rb_erase(&vma->vm_rb, &mm->mm_rb);
65947 mm->map_count--;
65948 tail_vma = vma;
65949 @@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
65950 struct vm_area_struct *new;
65951 int err = -ENOMEM;
65952
65953 +#ifdef CONFIG_PAX_SEGMEXEC
65954 + struct vm_area_struct *vma_m, *new_m = NULL;
65955 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65956 +#endif
65957 +
65958 if (is_vm_hugetlb_page(vma) && (addr &
65959 ~(huge_page_mask(hstate_vma(vma)))))
65960 return -EINVAL;
65961
65962 +#ifdef CONFIG_PAX_SEGMEXEC
65963 + vma_m = pax_find_mirror_vma(vma);
65964 +#endif
65965 +
65966 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65967 if (!new)
65968 goto out_err;
65969
65970 +#ifdef CONFIG_PAX_SEGMEXEC
65971 + if (vma_m) {
65972 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65973 + if (!new_m) {
65974 + kmem_cache_free(vm_area_cachep, new);
65975 + goto out_err;
65976 + }
65977 + }
65978 +#endif
65979 +
65980 /* most fields are the same, copy all, and then fixup */
65981 *new = *vma;
65982
65983 @@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
65984 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65985 }
65986
65987 +#ifdef CONFIG_PAX_SEGMEXEC
65988 + if (vma_m) {
65989 + *new_m = *vma_m;
65990 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
65991 + new_m->vm_mirror = new;
65992 + new->vm_mirror = new_m;
65993 +
65994 + if (new_below)
65995 + new_m->vm_end = addr_m;
65996 + else {
65997 + new_m->vm_start = addr_m;
65998 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65999 + }
66000 + }
66001 +#endif
66002 +
66003 pol = mpol_dup(vma_policy(vma));
66004 if (IS_ERR(pol)) {
66005 err = PTR_ERR(pol);
66006 @@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
66007 else
66008 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
66009
66010 +#ifdef CONFIG_PAX_SEGMEXEC
66011 + if (!err && vma_m) {
66012 + if (anon_vma_clone(new_m, vma_m))
66013 + goto out_free_mpol;
66014 +
66015 + mpol_get(pol);
66016 + vma_set_policy(new_m, pol);
66017 +
66018 + if (new_m->vm_file) {
66019 + get_file(new_m->vm_file);
66020 + if (vma_m->vm_flags & VM_EXECUTABLE)
66021 + added_exe_file_vma(mm);
66022 + }
66023 +
66024 + if (new_m->vm_ops && new_m->vm_ops->open)
66025 + new_m->vm_ops->open(new_m);
66026 +
66027 + if (new_below)
66028 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
66029 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
66030 + else
66031 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
66032 +
66033 + if (err) {
66034 + if (new_m->vm_ops && new_m->vm_ops->close)
66035 + new_m->vm_ops->close(new_m);
66036 + if (new_m->vm_file) {
66037 + if (vma_m->vm_flags & VM_EXECUTABLE)
66038 + removed_exe_file_vma(mm);
66039 + fput(new_m->vm_file);
66040 + }
66041 + mpol_put(pol);
66042 + }
66043 + }
66044 +#endif
66045 +
66046 /* Success. */
66047 if (!err)
66048 return 0;
66049 @@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
66050 removed_exe_file_vma(mm);
66051 fput(new->vm_file);
66052 }
66053 - unlink_anon_vmas(new);
66054 out_free_mpol:
66055 mpol_put(pol);
66056 out_free_vma:
66057 +
66058 +#ifdef CONFIG_PAX_SEGMEXEC
66059 + if (new_m) {
66060 + unlink_anon_vmas(new_m);
66061 + kmem_cache_free(vm_area_cachep, new_m);
66062 + }
66063 +#endif
66064 +
66065 + unlink_anon_vmas(new);
66066 kmem_cache_free(vm_area_cachep, new);
66067 out_err:
66068 return err;
66069 @@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
66070 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
66071 unsigned long addr, int new_below)
66072 {
66073 +
66074 +#ifdef CONFIG_PAX_SEGMEXEC
66075 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
66076 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
66077 + if (mm->map_count >= sysctl_max_map_count-1)
66078 + return -ENOMEM;
66079 + } else
66080 +#endif
66081 +
66082 if (mm->map_count >= sysctl_max_map_count)
66083 return -ENOMEM;
66084
66085 @@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
66086 * work. This now handles partial unmappings.
66087 * Jeremy Fitzhardinge <jeremy@goop.org>
66088 */
66089 +#ifdef CONFIG_PAX_SEGMEXEC
66090 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66091 {
66092 + int ret = __do_munmap(mm, start, len);
66093 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
66094 + return ret;
66095 +
66096 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
66097 +}
66098 +
66099 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66100 +#else
66101 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66102 +#endif
66103 +{
66104 unsigned long end;
66105 struct vm_area_struct *vma, *prev, *last;
66106
66107 + /*
66108 + * mm->mmap_sem is required to protect against another thread
66109 + * changing the mappings in case we sleep.
66110 + */
66111 + verify_mm_writelocked(mm);
66112 +
66113 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
66114 return -EINVAL;
66115
66116 @@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
66117 /* Fix up all other VM information */
66118 remove_vma_list(mm, vma);
66119
66120 + track_exec_limit(mm, start, end, 0UL);
66121 +
66122 return 0;
66123 }
66124
66125 @@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
66126
66127 profile_munmap(addr);
66128
66129 +#ifdef CONFIG_PAX_SEGMEXEC
66130 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
66131 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
66132 + return -EINVAL;
66133 +#endif
66134 +
66135 down_write(&mm->mmap_sem);
66136 ret = do_munmap(mm, addr, len);
66137 up_write(&mm->mmap_sem);
66138 return ret;
66139 }
66140
66141 -static inline void verify_mm_writelocked(struct mm_struct *mm)
66142 -{
66143 -#ifdef CONFIG_DEBUG_VM
66144 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
66145 - WARN_ON(1);
66146 - up_read(&mm->mmap_sem);
66147 - }
66148 -#endif
66149 -}
66150 -
66151 /*
66152 * this is really a simplified "do_mmap". it only handles
66153 * anonymous maps. eventually we may be able to do some
66154 @@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
66155 struct rb_node ** rb_link, * rb_parent;
66156 pgoff_t pgoff = addr >> PAGE_SHIFT;
66157 int error;
66158 + unsigned long charged;
66159
66160 len = PAGE_ALIGN(len);
66161 if (!len)
66162 @@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
66163
66164 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
66165
66166 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66167 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66168 + flags &= ~VM_EXEC;
66169 +
66170 +#ifdef CONFIG_PAX_MPROTECT
66171 + if (mm->pax_flags & MF_PAX_MPROTECT)
66172 + flags &= ~VM_MAYEXEC;
66173 +#endif
66174 +
66175 + }
66176 +#endif
66177 +
66178 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
66179 if (error & ~PAGE_MASK)
66180 return error;
66181
66182 + charged = len >> PAGE_SHIFT;
66183 +
66184 /*
66185 * mlock MCL_FUTURE?
66186 */
66187 if (mm->def_flags & VM_LOCKED) {
66188 unsigned long locked, lock_limit;
66189 - locked = len >> PAGE_SHIFT;
66190 + locked = charged;
66191 locked += mm->locked_vm;
66192 lock_limit = rlimit(RLIMIT_MEMLOCK);
66193 lock_limit >>= PAGE_SHIFT;
66194 @@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
66195 /*
66196 * Clear old maps. this also does some error checking for us
66197 */
66198 - munmap_back:
66199 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66200 if (vma && vma->vm_start < addr + len) {
66201 if (do_munmap(mm, addr, len))
66202 return -ENOMEM;
66203 - goto munmap_back;
66204 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66205 + BUG_ON(vma && vma->vm_start < addr + len);
66206 }
66207
66208 /* Check against address space limits *after* clearing old maps... */
66209 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
66210 + if (!may_expand_vm(mm, charged))
66211 return -ENOMEM;
66212
66213 if (mm->map_count > sysctl_max_map_count)
66214 return -ENOMEM;
66215
66216 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
66217 + if (security_vm_enough_memory(charged))
66218 return -ENOMEM;
66219
66220 /* Can we just expand an old private anonymous mapping? */
66221 @@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
66222 */
66223 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66224 if (!vma) {
66225 - vm_unacct_memory(len >> PAGE_SHIFT);
66226 + vm_unacct_memory(charged);
66227 return -ENOMEM;
66228 }
66229
66230 @@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
66231 vma_link(mm, vma, prev, rb_link, rb_parent);
66232 out:
66233 perf_event_mmap(vma);
66234 - mm->total_vm += len >> PAGE_SHIFT;
66235 + mm->total_vm += charged;
66236 if (flags & VM_LOCKED) {
66237 if (!mlock_vma_pages_range(vma, addr, addr + len))
66238 - mm->locked_vm += (len >> PAGE_SHIFT);
66239 + mm->locked_vm += charged;
66240 }
66241 + track_exec_limit(mm, addr, addr + len, flags);
66242 return addr;
66243 }
66244
66245 @@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
66246 * Walk the list again, actually closing and freeing it,
66247 * with preemption enabled, without holding any MM locks.
66248 */
66249 - while (vma)
66250 + while (vma) {
66251 + vma->vm_mirror = NULL;
66252 vma = remove_vma(vma);
66253 + }
66254
66255 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
66256 }
66257 @@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
66258 struct vm_area_struct * __vma, * prev;
66259 struct rb_node ** rb_link, * rb_parent;
66260
66261 +#ifdef CONFIG_PAX_SEGMEXEC
66262 + struct vm_area_struct *vma_m = NULL;
66263 +#endif
66264 +
66265 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
66266 + return -EPERM;
66267 +
66268 /*
66269 * The vm_pgoff of a purely anonymous vma should be irrelevant
66270 * until its first write fault, when page's anon_vma and index
66271 @@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
66272 if ((vma->vm_flags & VM_ACCOUNT) &&
66273 security_vm_enough_memory_mm(mm, vma_pages(vma)))
66274 return -ENOMEM;
66275 +
66276 +#ifdef CONFIG_PAX_SEGMEXEC
66277 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
66278 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66279 + if (!vma_m)
66280 + return -ENOMEM;
66281 + }
66282 +#endif
66283 +
66284 vma_link(mm, vma, prev, rb_link, rb_parent);
66285 +
66286 +#ifdef CONFIG_PAX_SEGMEXEC
66287 + if (vma_m)
66288 + BUG_ON(pax_mirror_vma(vma_m, vma));
66289 +#endif
66290 +
66291 return 0;
66292 }
66293
66294 @@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
66295 struct rb_node **rb_link, *rb_parent;
66296 struct mempolicy *pol;
66297
66298 + BUG_ON(vma->vm_mirror);
66299 +
66300 /*
66301 * If anonymous vma has not yet been faulted, update new pgoff
66302 * to match new location, to increase its chance of merging.
66303 @@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
66304 return NULL;
66305 }
66306
66307 +#ifdef CONFIG_PAX_SEGMEXEC
66308 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
66309 +{
66310 + struct vm_area_struct *prev_m;
66311 + struct rb_node **rb_link_m, *rb_parent_m;
66312 + struct mempolicy *pol_m;
66313 +
66314 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
66315 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
66316 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
66317 + *vma_m = *vma;
66318 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
66319 + if (anon_vma_clone(vma_m, vma))
66320 + return -ENOMEM;
66321 + pol_m = vma_policy(vma_m);
66322 + mpol_get(pol_m);
66323 + vma_set_policy(vma_m, pol_m);
66324 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
66325 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
66326 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
66327 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
66328 + if (vma_m->vm_file)
66329 + get_file(vma_m->vm_file);
66330 + if (vma_m->vm_ops && vma_m->vm_ops->open)
66331 + vma_m->vm_ops->open(vma_m);
66332 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
66333 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
66334 + vma_m->vm_mirror = vma;
66335 + vma->vm_mirror = vma_m;
66336 + return 0;
66337 +}
66338 +#endif
66339 +
66340 /*
66341 * Return true if the calling process may expand its vm space by the passed
66342 * number of pages
66343 @@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
66344 unsigned long lim;
66345
66346 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
66347 -
66348 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
66349 if (cur + npages > lim)
66350 return 0;
66351 return 1;
66352 @@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
66353 vma->vm_start = addr;
66354 vma->vm_end = addr + len;
66355
66356 +#ifdef CONFIG_PAX_MPROTECT
66357 + if (mm->pax_flags & MF_PAX_MPROTECT) {
66358 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
66359 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
66360 + return -EPERM;
66361 + if (!(vm_flags & VM_EXEC))
66362 + vm_flags &= ~VM_MAYEXEC;
66363 +#else
66364 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66365 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66366 +#endif
66367 + else
66368 + vm_flags &= ~VM_MAYWRITE;
66369 + }
66370 +#endif
66371 +
66372 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
66373 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66374
66375 diff -urNp linux-3.0.4/mm/mprotect.c linux-3.0.4/mm/mprotect.c
66376 --- linux-3.0.4/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
66377 +++ linux-3.0.4/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
66378 @@ -23,10 +23,16 @@
66379 #include <linux/mmu_notifier.h>
66380 #include <linux/migrate.h>
66381 #include <linux/perf_event.h>
66382 +
66383 +#ifdef CONFIG_PAX_MPROTECT
66384 +#include <linux/elf.h>
66385 +#endif
66386 +
66387 #include <asm/uaccess.h>
66388 #include <asm/pgtable.h>
66389 #include <asm/cacheflush.h>
66390 #include <asm/tlbflush.h>
66391 +#include <asm/mmu_context.h>
66392
66393 #ifndef pgprot_modify
66394 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
66395 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
66396 flush_tlb_range(vma, start, end);
66397 }
66398
66399 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66400 +/* called while holding the mmap semaphor for writing except stack expansion */
66401 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
66402 +{
66403 + unsigned long oldlimit, newlimit = 0UL;
66404 +
66405 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
66406 + return;
66407 +
66408 + spin_lock(&mm->page_table_lock);
66409 + oldlimit = mm->context.user_cs_limit;
66410 + if ((prot & VM_EXEC) && oldlimit < end)
66411 + /* USER_CS limit moved up */
66412 + newlimit = end;
66413 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
66414 + /* USER_CS limit moved down */
66415 + newlimit = start;
66416 +
66417 + if (newlimit) {
66418 + mm->context.user_cs_limit = newlimit;
66419 +
66420 +#ifdef CONFIG_SMP
66421 + wmb();
66422 + cpus_clear(mm->context.cpu_user_cs_mask);
66423 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
66424 +#endif
66425 +
66426 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
66427 + }
66428 + spin_unlock(&mm->page_table_lock);
66429 + if (newlimit == end) {
66430 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
66431 +
66432 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
66433 + if (is_vm_hugetlb_page(vma))
66434 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
66435 + else
66436 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
66437 + }
66438 +}
66439 +#endif
66440 +
66441 int
66442 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
66443 unsigned long start, unsigned long end, unsigned long newflags)
66444 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
66445 int error;
66446 int dirty_accountable = 0;
66447
66448 +#ifdef CONFIG_PAX_SEGMEXEC
66449 + struct vm_area_struct *vma_m = NULL;
66450 + unsigned long start_m, end_m;
66451 +
66452 + start_m = start + SEGMEXEC_TASK_SIZE;
66453 + end_m = end + SEGMEXEC_TASK_SIZE;
66454 +#endif
66455 +
66456 if (newflags == oldflags) {
66457 *pprev = vma;
66458 return 0;
66459 }
66460
66461 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
66462 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
66463 +
66464 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
66465 + return -ENOMEM;
66466 +
66467 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
66468 + return -ENOMEM;
66469 + }
66470 +
66471 /*
66472 * If we make a private mapping writable we increase our commit;
66473 * but (without finer accounting) cannot reduce our commit if we
66474 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
66475 }
66476 }
66477
66478 +#ifdef CONFIG_PAX_SEGMEXEC
66479 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
66480 + if (start != vma->vm_start) {
66481 + error = split_vma(mm, vma, start, 1);
66482 + if (error)
66483 + goto fail;
66484 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
66485 + *pprev = (*pprev)->vm_next;
66486 + }
66487 +
66488 + if (end != vma->vm_end) {
66489 + error = split_vma(mm, vma, end, 0);
66490 + if (error)
66491 + goto fail;
66492 + }
66493 +
66494 + if (pax_find_mirror_vma(vma)) {
66495 + error = __do_munmap(mm, start_m, end_m - start_m);
66496 + if (error)
66497 + goto fail;
66498 + } else {
66499 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66500 + if (!vma_m) {
66501 + error = -ENOMEM;
66502 + goto fail;
66503 + }
66504 + vma->vm_flags = newflags;
66505 + error = pax_mirror_vma(vma_m, vma);
66506 + if (error) {
66507 + vma->vm_flags = oldflags;
66508 + goto fail;
66509 + }
66510 + }
66511 + }
66512 +#endif
66513 +
66514 /*
66515 * First try to merge with previous and/or next vma.
66516 */
66517 @@ -204,9 +306,21 @@ success:
66518 * vm_flags and vm_page_prot are protected by the mmap_sem
66519 * held in write mode.
66520 */
66521 +
66522 +#ifdef CONFIG_PAX_SEGMEXEC
66523 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
66524 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
66525 +#endif
66526 +
66527 vma->vm_flags = newflags;
66528 +
66529 +#ifdef CONFIG_PAX_MPROTECT
66530 + if (mm->binfmt && mm->binfmt->handle_mprotect)
66531 + mm->binfmt->handle_mprotect(vma, newflags);
66532 +#endif
66533 +
66534 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
66535 - vm_get_page_prot(newflags));
66536 + vm_get_page_prot(vma->vm_flags));
66537
66538 if (vma_wants_writenotify(vma)) {
66539 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
66540 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66541 end = start + len;
66542 if (end <= start)
66543 return -ENOMEM;
66544 +
66545 +#ifdef CONFIG_PAX_SEGMEXEC
66546 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66547 + if (end > SEGMEXEC_TASK_SIZE)
66548 + return -EINVAL;
66549 + } else
66550 +#endif
66551 +
66552 + if (end > TASK_SIZE)
66553 + return -EINVAL;
66554 +
66555 if (!arch_validate_prot(prot))
66556 return -EINVAL;
66557
66558 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66559 /*
66560 * Does the application expect PROT_READ to imply PROT_EXEC:
66561 */
66562 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66563 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66564 prot |= PROT_EXEC;
66565
66566 vm_flags = calc_vm_prot_bits(prot);
66567 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66568 if (start > vma->vm_start)
66569 prev = vma;
66570
66571 +#ifdef CONFIG_PAX_MPROTECT
66572 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
66573 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
66574 +#endif
66575 +
66576 for (nstart = start ; ; ) {
66577 unsigned long newflags;
66578
66579 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66580
66581 /* newflags >> 4 shift VM_MAY% in place of VM_% */
66582 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
66583 + if (prot & (PROT_WRITE | PROT_EXEC))
66584 + gr_log_rwxmprotect(vma->vm_file);
66585 +
66586 + error = -EACCES;
66587 + goto out;
66588 + }
66589 +
66590 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
66591 error = -EACCES;
66592 goto out;
66593 }
66594 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66595 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
66596 if (error)
66597 goto out;
66598 +
66599 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
66600 +
66601 nstart = tmp;
66602
66603 if (nstart < prev->vm_end)
66604 diff -urNp linux-3.0.4/mm/mremap.c linux-3.0.4/mm/mremap.c
66605 --- linux-3.0.4/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
66606 +++ linux-3.0.4/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
66607 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
66608 continue;
66609 pte = ptep_clear_flush(vma, old_addr, old_pte);
66610 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
66611 +
66612 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66613 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
66614 + pte = pte_exprotect(pte);
66615 +#endif
66616 +
66617 set_pte_at(mm, new_addr, new_pte, pte);
66618 }
66619
66620 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
66621 if (is_vm_hugetlb_page(vma))
66622 goto Einval;
66623
66624 +#ifdef CONFIG_PAX_SEGMEXEC
66625 + if (pax_find_mirror_vma(vma))
66626 + goto Einval;
66627 +#endif
66628 +
66629 /* We can't remap across vm area boundaries */
66630 if (old_len > vma->vm_end - addr)
66631 goto Efault;
66632 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
66633 unsigned long ret = -EINVAL;
66634 unsigned long charged = 0;
66635 unsigned long map_flags;
66636 + unsigned long pax_task_size = TASK_SIZE;
66637
66638 if (new_addr & ~PAGE_MASK)
66639 goto out;
66640
66641 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
66642 +#ifdef CONFIG_PAX_SEGMEXEC
66643 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
66644 + pax_task_size = SEGMEXEC_TASK_SIZE;
66645 +#endif
66646 +
66647 + pax_task_size -= PAGE_SIZE;
66648 +
66649 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
66650 goto out;
66651
66652 /* Check if the location we're moving into overlaps the
66653 * old location at all, and fail if it does.
66654 */
66655 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
66656 - goto out;
66657 -
66658 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
66659 + if (addr + old_len > new_addr && new_addr + new_len > addr)
66660 goto out;
66661
66662 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66663 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
66664 struct vm_area_struct *vma;
66665 unsigned long ret = -EINVAL;
66666 unsigned long charged = 0;
66667 + unsigned long pax_task_size = TASK_SIZE;
66668
66669 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
66670 goto out;
66671 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
66672 if (!new_len)
66673 goto out;
66674
66675 +#ifdef CONFIG_PAX_SEGMEXEC
66676 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
66677 + pax_task_size = SEGMEXEC_TASK_SIZE;
66678 +#endif
66679 +
66680 + pax_task_size -= PAGE_SIZE;
66681 +
66682 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
66683 + old_len > pax_task_size || addr > pax_task_size-old_len)
66684 + goto out;
66685 +
66686 if (flags & MREMAP_FIXED) {
66687 if (flags & MREMAP_MAYMOVE)
66688 ret = mremap_to(addr, old_len, new_addr, new_len);
66689 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
66690 addr + new_len);
66691 }
66692 ret = addr;
66693 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
66694 goto out;
66695 }
66696 }
66697 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
66698 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66699 if (ret)
66700 goto out;
66701 +
66702 + map_flags = vma->vm_flags;
66703 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66704 + if (!(ret & ~PAGE_MASK)) {
66705 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66706 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66707 + }
66708 }
66709 out:
66710 if (ret & ~PAGE_MASK)
66711 diff -urNp linux-3.0.4/mm/nobootmem.c linux-3.0.4/mm/nobootmem.c
66712 --- linux-3.0.4/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
66713 +++ linux-3.0.4/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
66714 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
66715 unsigned long __init free_all_memory_core_early(int nodeid)
66716 {
66717 int i;
66718 - u64 start, end;
66719 + u64 start, end, startrange, endrange;
66720 unsigned long count = 0;
66721 - struct range *range = NULL;
66722 + struct range *range = NULL, rangerange = { 0, 0 };
66723 int nr_range;
66724
66725 nr_range = get_free_all_memory_range(&range, nodeid);
66726 + startrange = __pa(range) >> PAGE_SHIFT;
66727 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
66728
66729 for (i = 0; i < nr_range; i++) {
66730 start = range[i].start;
66731 end = range[i].end;
66732 + if (start <= endrange && startrange < end) {
66733 + BUG_ON(rangerange.start | rangerange.end);
66734 + rangerange = range[i];
66735 + continue;
66736 + }
66737 count += end - start;
66738 __free_pages_memory(start, end);
66739 }
66740 + start = rangerange.start;
66741 + end = rangerange.end;
66742 + count += end - start;
66743 + __free_pages_memory(start, end);
66744
66745 return count;
66746 }
66747 diff -urNp linux-3.0.4/mm/nommu.c linux-3.0.4/mm/nommu.c
66748 --- linux-3.0.4/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
66749 +++ linux-3.0.4/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
66750 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66751 int sysctl_overcommit_ratio = 50; /* default is 50% */
66752 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66753 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66754 -int heap_stack_gap = 0;
66755
66756 atomic_long_t mmap_pages_allocated;
66757
66758 @@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
66759 EXPORT_SYMBOL(find_vma);
66760
66761 /*
66762 - * find a VMA
66763 - * - we don't extend stack VMAs under NOMMU conditions
66764 - */
66765 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66766 -{
66767 - return find_vma(mm, addr);
66768 -}
66769 -
66770 -/*
66771 * expand a stack to a given address
66772 * - not supported under NOMMU conditions
66773 */
66774 @@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
66775
66776 /* most fields are the same, copy all, and then fixup */
66777 *new = *vma;
66778 + INIT_LIST_HEAD(&new->anon_vma_chain);
66779 *region = *vma->vm_region;
66780 new->vm_region = region;
66781
66782 diff -urNp linux-3.0.4/mm/page_alloc.c linux-3.0.4/mm/page_alloc.c
66783 --- linux-3.0.4/mm/page_alloc.c 2011-07-21 22:17:23.000000000 -0400
66784 +++ linux-3.0.4/mm/page_alloc.c 2011-08-23 21:48:14.000000000 -0400
66785 @@ -340,7 +340,7 @@ out:
66786 * This usage means that zero-order pages may not be compound.
66787 */
66788
66789 -static void free_compound_page(struct page *page)
66790 +void free_compound_page(struct page *page)
66791 {
66792 __free_pages_ok(page, compound_order(page));
66793 }
66794 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
66795 int i;
66796 int bad = 0;
66797
66798 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66799 + unsigned long index = 1UL << order;
66800 +#endif
66801 +
66802 trace_mm_page_free_direct(page, order);
66803 kmemcheck_free_shadow(page, order);
66804
66805 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
66806 debug_check_no_obj_freed(page_address(page),
66807 PAGE_SIZE << order);
66808 }
66809 +
66810 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66811 + for (; index; --index)
66812 + sanitize_highpage(page + index - 1);
66813 +#endif
66814 +
66815 arch_free_page(page, order);
66816 kernel_map_pages(page, 1 << order, 0);
66817
66818 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
66819 arch_alloc_page(page, order);
66820 kernel_map_pages(page, 1 << order, 1);
66821
66822 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
66823 if (gfp_flags & __GFP_ZERO)
66824 prep_zero_page(page, order, gfp_flags);
66825 +#endif
66826
66827 if (order && (gfp_flags & __GFP_COMP))
66828 prep_compound_page(page, order);
66829 @@ -2525,6 +2537,8 @@ void show_free_areas(unsigned int filter
66830 int cpu;
66831 struct zone *zone;
66832
66833 + pax_track_stack();
66834 +
66835 for_each_populated_zone(zone) {
66836 if (skip_free_areas_node(filter, zone_to_nid(zone)))
66837 continue;
66838 diff -urNp linux-3.0.4/mm/percpu.c linux-3.0.4/mm/percpu.c
66839 --- linux-3.0.4/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
66840 +++ linux-3.0.4/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
66841 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
66842 static unsigned int pcpu_last_unit_cpu __read_mostly;
66843
66844 /* the address of the first chunk which starts with the kernel static area */
66845 -void *pcpu_base_addr __read_mostly;
66846 +void *pcpu_base_addr __read_only;
66847 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66848
66849 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66850 diff -urNp linux-3.0.4/mm/rmap.c linux-3.0.4/mm/rmap.c
66851 --- linux-3.0.4/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
66852 +++ linux-3.0.4/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
66853 @@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
66854 struct anon_vma *anon_vma = vma->anon_vma;
66855 struct anon_vma_chain *avc;
66856
66857 +#ifdef CONFIG_PAX_SEGMEXEC
66858 + struct anon_vma_chain *avc_m = NULL;
66859 +#endif
66860 +
66861 might_sleep();
66862 if (unlikely(!anon_vma)) {
66863 struct mm_struct *mm = vma->vm_mm;
66864 @@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
66865 if (!avc)
66866 goto out_enomem;
66867
66868 +#ifdef CONFIG_PAX_SEGMEXEC
66869 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
66870 + if (!avc_m)
66871 + goto out_enomem_free_avc;
66872 +#endif
66873 +
66874 anon_vma = find_mergeable_anon_vma(vma);
66875 allocated = NULL;
66876 if (!anon_vma) {
66877 @@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
66878 /* page_table_lock to protect against threads */
66879 spin_lock(&mm->page_table_lock);
66880 if (likely(!vma->anon_vma)) {
66881 +
66882 +#ifdef CONFIG_PAX_SEGMEXEC
66883 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66884 +
66885 + if (vma_m) {
66886 + BUG_ON(vma_m->anon_vma);
66887 + vma_m->anon_vma = anon_vma;
66888 + avc_m->anon_vma = anon_vma;
66889 + avc_m->vma = vma;
66890 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
66891 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
66892 + avc_m = NULL;
66893 + }
66894 +#endif
66895 +
66896 vma->anon_vma = anon_vma;
66897 avc->anon_vma = anon_vma;
66898 avc->vma = vma;
66899 @@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
66900
66901 if (unlikely(allocated))
66902 put_anon_vma(allocated);
66903 +
66904 +#ifdef CONFIG_PAX_SEGMEXEC
66905 + if (unlikely(avc_m))
66906 + anon_vma_chain_free(avc_m);
66907 +#endif
66908 +
66909 if (unlikely(avc))
66910 anon_vma_chain_free(avc);
66911 }
66912 return 0;
66913
66914 out_enomem_free_avc:
66915 +
66916 +#ifdef CONFIG_PAX_SEGMEXEC
66917 + if (avc_m)
66918 + anon_vma_chain_free(avc_m);
66919 +#endif
66920 +
66921 anon_vma_chain_free(avc);
66922 out_enomem:
66923 return -ENOMEM;
66924 @@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
66925 * Attach the anon_vmas from src to dst.
66926 * Returns 0 on success, -ENOMEM on failure.
66927 */
66928 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
66929 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
66930 {
66931 struct anon_vma_chain *avc, *pavc;
66932 struct anon_vma *root = NULL;
66933 @@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
66934 * the corresponding VMA in the parent process is attached to.
66935 * Returns 0 on success, non-zero on failure.
66936 */
66937 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
66938 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
66939 {
66940 struct anon_vma_chain *avc;
66941 struct anon_vma *anon_vma;
66942 diff -urNp linux-3.0.4/mm/shmem.c linux-3.0.4/mm/shmem.c
66943 --- linux-3.0.4/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
66944 +++ linux-3.0.4/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
66945 @@ -31,7 +31,7 @@
66946 #include <linux/percpu_counter.h>
66947 #include <linux/swap.h>
66948
66949 -static struct vfsmount *shm_mnt;
66950 +struct vfsmount *shm_mnt;
66951
66952 #ifdef CONFIG_SHMEM
66953 /*
66954 @@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
66955 goto unlock;
66956 }
66957 entry = shmem_swp_entry(info, index, NULL);
66958 + if (!entry)
66959 + goto unlock;
66960 if (entry->val) {
66961 /*
66962 * The more uptodate page coming down from a stacked
66963 @@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
66964 struct vm_area_struct pvma;
66965 struct page *page;
66966
66967 + pax_track_stack();
66968 +
66969 spol = mpol_cond_copy(&mpol,
66970 mpol_shared_policy_lookup(&info->policy, idx));
66971
66972 @@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
66973 int err = -ENOMEM;
66974
66975 /* Round up to L1_CACHE_BYTES to resist false sharing */
66976 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66977 - L1_CACHE_BYTES), GFP_KERNEL);
66978 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66979 if (!sbinfo)
66980 return -ENOMEM;
66981
66982 diff -urNp linux-3.0.4/mm/slab.c linux-3.0.4/mm/slab.c
66983 --- linux-3.0.4/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
66984 +++ linux-3.0.4/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
66985 @@ -151,7 +151,7 @@
66986
66987 /* Legal flag mask for kmem_cache_create(). */
66988 #if DEBUG
66989 -# define CREATE_MASK (SLAB_RED_ZONE | \
66990 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66991 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66992 SLAB_CACHE_DMA | \
66993 SLAB_STORE_USER | \
66994 @@ -159,7 +159,7 @@
66995 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66996 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66997 #else
66998 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66999 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
67000 SLAB_CACHE_DMA | \
67001 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
67002 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
67003 @@ -288,7 +288,7 @@ struct kmem_list3 {
67004 * Need this for bootstrapping a per node allocator.
67005 */
67006 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
67007 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
67008 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
67009 #define CACHE_CACHE 0
67010 #define SIZE_AC MAX_NUMNODES
67011 #define SIZE_L3 (2 * MAX_NUMNODES)
67012 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
67013 if ((x)->max_freeable < i) \
67014 (x)->max_freeable = i; \
67015 } while (0)
67016 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
67017 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
67018 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
67019 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
67020 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
67021 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
67022 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
67023 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
67024 #else
67025 #define STATS_INC_ACTIVE(x) do { } while (0)
67026 #define STATS_DEC_ACTIVE(x) do { } while (0)
67027 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
67028 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
67029 */
67030 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
67031 - const struct slab *slab, void *obj)
67032 + const struct slab *slab, const void *obj)
67033 {
67034 u32 offset = (obj - slab->s_mem);
67035 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
67036 @@ -564,7 +564,7 @@ struct cache_names {
67037 static struct cache_names __initdata cache_names[] = {
67038 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
67039 #include <linux/kmalloc_sizes.h>
67040 - {NULL,}
67041 + {NULL}
67042 #undef CACHE
67043 };
67044
67045 @@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
67046 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
67047 sizes[INDEX_AC].cs_size,
67048 ARCH_KMALLOC_MINALIGN,
67049 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
67050 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
67051 NULL);
67052
67053 if (INDEX_AC != INDEX_L3) {
67054 @@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
67055 kmem_cache_create(names[INDEX_L3].name,
67056 sizes[INDEX_L3].cs_size,
67057 ARCH_KMALLOC_MINALIGN,
67058 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
67059 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
67060 NULL);
67061 }
67062
67063 @@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
67064 sizes->cs_cachep = kmem_cache_create(names->name,
67065 sizes->cs_size,
67066 ARCH_KMALLOC_MINALIGN,
67067 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
67068 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
67069 NULL);
67070 }
67071 #ifdef CONFIG_ZONE_DMA
67072 @@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
67073 }
67074 /* cpu stats */
67075 {
67076 - unsigned long allochit = atomic_read(&cachep->allochit);
67077 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
67078 - unsigned long freehit = atomic_read(&cachep->freehit);
67079 - unsigned long freemiss = atomic_read(&cachep->freemiss);
67080 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
67081 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
67082 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
67083 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
67084
67085 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
67086 allochit, allocmiss, freehit, freemiss);
67087 @@ -4532,15 +4532,66 @@ static const struct file_operations proc
67088
67089 static int __init slab_proc_init(void)
67090 {
67091 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
67092 + mode_t gr_mode = S_IRUGO;
67093 +
67094 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67095 + gr_mode = S_IRUSR;
67096 +#endif
67097 +
67098 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
67099 #ifdef CONFIG_DEBUG_SLAB_LEAK
67100 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
67101 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
67102 #endif
67103 return 0;
67104 }
67105 module_init(slab_proc_init);
67106 #endif
67107
67108 +void check_object_size(const void *ptr, unsigned long n, bool to)
67109 +{
67110 +
67111 +#ifdef CONFIG_PAX_USERCOPY
67112 + struct page *page;
67113 + struct kmem_cache *cachep = NULL;
67114 + struct slab *slabp;
67115 + unsigned int objnr;
67116 + unsigned long offset;
67117 +
67118 + if (!n)
67119 + return;
67120 +
67121 + if (ZERO_OR_NULL_PTR(ptr))
67122 + goto report;
67123 +
67124 + if (!virt_addr_valid(ptr))
67125 + return;
67126 +
67127 + page = virt_to_head_page(ptr);
67128 +
67129 + if (!PageSlab(page)) {
67130 + if (object_is_on_stack(ptr, n) == -1)
67131 + goto report;
67132 + return;
67133 + }
67134 +
67135 + cachep = page_get_cache(page);
67136 + if (!(cachep->flags & SLAB_USERCOPY))
67137 + goto report;
67138 +
67139 + slabp = page_get_slab(page);
67140 + objnr = obj_to_index(cachep, slabp, ptr);
67141 + BUG_ON(objnr >= cachep->num);
67142 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
67143 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
67144 + return;
67145 +
67146 +report:
67147 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
67148 +#endif
67149 +
67150 +}
67151 +EXPORT_SYMBOL(check_object_size);
67152 +
67153 /**
67154 * ksize - get the actual amount of memory allocated for a given object
67155 * @objp: Pointer to the object
67156 diff -urNp linux-3.0.4/mm/slob.c linux-3.0.4/mm/slob.c
67157 --- linux-3.0.4/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
67158 +++ linux-3.0.4/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
67159 @@ -29,7 +29,7 @@
67160 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
67161 * alloc_pages() directly, allocating compound pages so the page order
67162 * does not have to be separately tracked, and also stores the exact
67163 - * allocation size in page->private so that it can be used to accurately
67164 + * allocation size in slob_page->size so that it can be used to accurately
67165 * provide ksize(). These objects are detected in kfree() because slob_page()
67166 * is false for them.
67167 *
67168 @@ -58,6 +58,7 @@
67169 */
67170
67171 #include <linux/kernel.h>
67172 +#include <linux/sched.h>
67173 #include <linux/slab.h>
67174 #include <linux/mm.h>
67175 #include <linux/swap.h> /* struct reclaim_state */
67176 @@ -102,7 +103,8 @@ struct slob_page {
67177 unsigned long flags; /* mandatory */
67178 atomic_t _count; /* mandatory */
67179 slobidx_t units; /* free units left in page */
67180 - unsigned long pad[2];
67181 + unsigned long pad[1];
67182 + unsigned long size; /* size when >=PAGE_SIZE */
67183 slob_t *free; /* first free slob_t in page */
67184 struct list_head list; /* linked list of free pages */
67185 };
67186 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
67187 */
67188 static inline int is_slob_page(struct slob_page *sp)
67189 {
67190 - return PageSlab((struct page *)sp);
67191 + return PageSlab((struct page *)sp) && !sp->size;
67192 }
67193
67194 static inline void set_slob_page(struct slob_page *sp)
67195 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
67196
67197 static inline struct slob_page *slob_page(const void *addr)
67198 {
67199 - return (struct slob_page *)virt_to_page(addr);
67200 + return (struct slob_page *)virt_to_head_page(addr);
67201 }
67202
67203 /*
67204 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
67205 /*
67206 * Return the size of a slob block.
67207 */
67208 -static slobidx_t slob_units(slob_t *s)
67209 +static slobidx_t slob_units(const slob_t *s)
67210 {
67211 if (s->units > 0)
67212 return s->units;
67213 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
67214 /*
67215 * Return the next free slob block pointer after this one.
67216 */
67217 -static slob_t *slob_next(slob_t *s)
67218 +static slob_t *slob_next(const slob_t *s)
67219 {
67220 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
67221 slobidx_t next;
67222 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
67223 /*
67224 * Returns true if s is the last free block in its page.
67225 */
67226 -static int slob_last(slob_t *s)
67227 +static int slob_last(const slob_t *s)
67228 {
67229 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
67230 }
67231 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
67232 if (!page)
67233 return NULL;
67234
67235 + set_slob_page(page);
67236 return page_address(page);
67237 }
67238
67239 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
67240 if (!b)
67241 return NULL;
67242 sp = slob_page(b);
67243 - set_slob_page(sp);
67244
67245 spin_lock_irqsave(&slob_lock, flags);
67246 sp->units = SLOB_UNITS(PAGE_SIZE);
67247 sp->free = b;
67248 + sp->size = 0;
67249 INIT_LIST_HEAD(&sp->list);
67250 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
67251 set_slob_page_free(sp, slob_list);
67252 @@ -476,10 +479,9 @@ out:
67253 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
67254 */
67255
67256 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67257 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
67258 {
67259 - unsigned int *m;
67260 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67261 + slob_t *m;
67262 void *ret;
67263
67264 lockdep_trace_alloc(gfp);
67265 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
67266
67267 if (!m)
67268 return NULL;
67269 - *m = size;
67270 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
67271 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
67272 + m[0].units = size;
67273 + m[1].units = align;
67274 ret = (void *)m + align;
67275
67276 trace_kmalloc_node(_RET_IP_, ret,
67277 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
67278 gfp |= __GFP_COMP;
67279 ret = slob_new_pages(gfp, order, node);
67280 if (ret) {
67281 - struct page *page;
67282 - page = virt_to_page(ret);
67283 - page->private = size;
67284 + struct slob_page *sp;
67285 + sp = slob_page(ret);
67286 + sp->size = size;
67287 }
67288
67289 trace_kmalloc_node(_RET_IP_, ret,
67290 size, PAGE_SIZE << order, gfp, node);
67291 }
67292
67293 - kmemleak_alloc(ret, size, 1, gfp);
67294 + return ret;
67295 +}
67296 +
67297 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67298 +{
67299 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67300 + void *ret = __kmalloc_node_align(size, gfp, node, align);
67301 +
67302 + if (!ZERO_OR_NULL_PTR(ret))
67303 + kmemleak_alloc(ret, size, 1, gfp);
67304 return ret;
67305 }
67306 EXPORT_SYMBOL(__kmalloc_node);
67307 @@ -531,13 +545,88 @@ void kfree(const void *block)
67308 sp = slob_page(block);
67309 if (is_slob_page(sp)) {
67310 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67311 - unsigned int *m = (unsigned int *)(block - align);
67312 - slob_free(m, *m + align);
67313 - } else
67314 + slob_t *m = (slob_t *)(block - align);
67315 + slob_free(m, m[0].units + align);
67316 + } else {
67317 + clear_slob_page(sp);
67318 + free_slob_page(sp);
67319 + sp->size = 0;
67320 put_page(&sp->page);
67321 + }
67322 }
67323 EXPORT_SYMBOL(kfree);
67324
67325 +void check_object_size(const void *ptr, unsigned long n, bool to)
67326 +{
67327 +
67328 +#ifdef CONFIG_PAX_USERCOPY
67329 + struct slob_page *sp;
67330 + const slob_t *free;
67331 + const void *base;
67332 + unsigned long flags;
67333 +
67334 + if (!n)
67335 + return;
67336 +
67337 + if (ZERO_OR_NULL_PTR(ptr))
67338 + goto report;
67339 +
67340 + if (!virt_addr_valid(ptr))
67341 + return;
67342 +
67343 + sp = slob_page(ptr);
67344 + if (!PageSlab((struct page*)sp)) {
67345 + if (object_is_on_stack(ptr, n) == -1)
67346 + goto report;
67347 + return;
67348 + }
67349 +
67350 + if (sp->size) {
67351 + base = page_address(&sp->page);
67352 + if (base <= ptr && n <= sp->size - (ptr - base))
67353 + return;
67354 + goto report;
67355 + }
67356 +
67357 + /* some tricky double walking to find the chunk */
67358 + spin_lock_irqsave(&slob_lock, flags);
67359 + base = (void *)((unsigned long)ptr & PAGE_MASK);
67360 + free = sp->free;
67361 +
67362 + while (!slob_last(free) && (void *)free <= ptr) {
67363 + base = free + slob_units(free);
67364 + free = slob_next(free);
67365 + }
67366 +
67367 + while (base < (void *)free) {
67368 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
67369 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
67370 + int offset;
67371 +
67372 + if (ptr < base + align)
67373 + break;
67374 +
67375 + offset = ptr - base - align;
67376 + if (offset >= m) {
67377 + base += size;
67378 + continue;
67379 + }
67380 +
67381 + if (n > m - offset)
67382 + break;
67383 +
67384 + spin_unlock_irqrestore(&slob_lock, flags);
67385 + return;
67386 + }
67387 +
67388 + spin_unlock_irqrestore(&slob_lock, flags);
67389 +report:
67390 + pax_report_usercopy(ptr, n, to, NULL);
67391 +#endif
67392 +
67393 +}
67394 +EXPORT_SYMBOL(check_object_size);
67395 +
67396 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
67397 size_t ksize(const void *block)
67398 {
67399 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
67400 sp = slob_page(block);
67401 if (is_slob_page(sp)) {
67402 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67403 - unsigned int *m = (unsigned int *)(block - align);
67404 - return SLOB_UNITS(*m) * SLOB_UNIT;
67405 + slob_t *m = (slob_t *)(block - align);
67406 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
67407 } else
67408 - return sp->page.private;
67409 + return sp->size;
67410 }
67411 EXPORT_SYMBOL(ksize);
67412
67413 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
67414 {
67415 struct kmem_cache *c;
67416
67417 +#ifdef CONFIG_PAX_USERCOPY
67418 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
67419 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
67420 +#else
67421 c = slob_alloc(sizeof(struct kmem_cache),
67422 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
67423 +#endif
67424
67425 if (c) {
67426 c->name = name;
67427 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
67428 {
67429 void *b;
67430
67431 +#ifdef CONFIG_PAX_USERCOPY
67432 + b = __kmalloc_node_align(c->size, flags, node, c->align);
67433 +#else
67434 if (c->size < PAGE_SIZE) {
67435 b = slob_alloc(c->size, flags, c->align, node);
67436 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67437 SLOB_UNITS(c->size) * SLOB_UNIT,
67438 flags, node);
67439 } else {
67440 + struct slob_page *sp;
67441 +
67442 b = slob_new_pages(flags, get_order(c->size), node);
67443 + sp = slob_page(b);
67444 + sp->size = c->size;
67445 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67446 PAGE_SIZE << get_order(c->size),
67447 flags, node);
67448 }
67449 +#endif
67450
67451 if (c->ctor)
67452 c->ctor(b);
67453 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
67454
67455 static void __kmem_cache_free(void *b, int size)
67456 {
67457 - if (size < PAGE_SIZE)
67458 + struct slob_page *sp = slob_page(b);
67459 +
67460 + if (is_slob_page(sp))
67461 slob_free(b, size);
67462 - else
67463 + else {
67464 + clear_slob_page(sp);
67465 + free_slob_page(sp);
67466 + sp->size = 0;
67467 slob_free_pages(b, get_order(size));
67468 + }
67469 }
67470
67471 static void kmem_rcu_free(struct rcu_head *head)
67472 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
67473
67474 void kmem_cache_free(struct kmem_cache *c, void *b)
67475 {
67476 + int size = c->size;
67477 +
67478 +#ifdef CONFIG_PAX_USERCOPY
67479 + if (size + c->align < PAGE_SIZE) {
67480 + size += c->align;
67481 + b -= c->align;
67482 + }
67483 +#endif
67484 +
67485 kmemleak_free_recursive(b, c->flags);
67486 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
67487 struct slob_rcu *slob_rcu;
67488 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
67489 - slob_rcu->size = c->size;
67490 + slob_rcu = b + (size - sizeof(struct slob_rcu));
67491 + slob_rcu->size = size;
67492 call_rcu(&slob_rcu->head, kmem_rcu_free);
67493 } else {
67494 - __kmem_cache_free(b, c->size);
67495 + __kmem_cache_free(b, size);
67496 }
67497
67498 +#ifdef CONFIG_PAX_USERCOPY
67499 + trace_kfree(_RET_IP_, b);
67500 +#else
67501 trace_kmem_cache_free(_RET_IP_, b);
67502 +#endif
67503 +
67504 }
67505 EXPORT_SYMBOL(kmem_cache_free);
67506
67507 diff -urNp linux-3.0.4/mm/slub.c linux-3.0.4/mm/slub.c
67508 --- linux-3.0.4/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
67509 +++ linux-3.0.4/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
67510 @@ -200,7 +200,7 @@ struct track {
67511
67512 enum track_item { TRACK_ALLOC, TRACK_FREE };
67513
67514 -#ifdef CONFIG_SYSFS
67515 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
67516 static int sysfs_slab_add(struct kmem_cache *);
67517 static int sysfs_slab_alias(struct kmem_cache *, const char *);
67518 static void sysfs_slab_remove(struct kmem_cache *);
67519 @@ -442,7 +442,7 @@ static void print_track(const char *s, s
67520 if (!t->addr)
67521 return;
67522
67523 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
67524 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
67525 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
67526 }
67527
67528 @@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
67529
67530 page = virt_to_head_page(x);
67531
67532 + BUG_ON(!PageSlab(page));
67533 +
67534 slab_free(s, page, x, _RET_IP_);
67535
67536 trace_kmem_cache_free(_RET_IP_, x);
67537 @@ -2170,7 +2172,7 @@ static int slub_min_objects;
67538 * Merge control. If this is set then no merging of slab caches will occur.
67539 * (Could be removed. This was introduced to pacify the merge skeptics.)
67540 */
67541 -static int slub_nomerge;
67542 +static int slub_nomerge = 1;
67543
67544 /*
67545 * Calculate the order of allocation given an slab object size.
67546 @@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
67547 * list to avoid pounding the page allocator excessively.
67548 */
67549 set_min_partial(s, ilog2(s->size));
67550 - s->refcount = 1;
67551 + atomic_set(&s->refcount, 1);
67552 #ifdef CONFIG_NUMA
67553 s->remote_node_defrag_ratio = 1000;
67554 #endif
67555 @@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
67556 void kmem_cache_destroy(struct kmem_cache *s)
67557 {
67558 down_write(&slub_lock);
67559 - s->refcount--;
67560 - if (!s->refcount) {
67561 + if (atomic_dec_and_test(&s->refcount)) {
67562 list_del(&s->list);
67563 if (kmem_cache_close(s)) {
67564 printk(KERN_ERR "SLUB %s: %s called for cache that "
67565 @@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
67566 EXPORT_SYMBOL(__kmalloc_node);
67567 #endif
67568
67569 +void check_object_size(const void *ptr, unsigned long n, bool to)
67570 +{
67571 +
67572 +#ifdef CONFIG_PAX_USERCOPY
67573 + struct page *page;
67574 + struct kmem_cache *s = NULL;
67575 + unsigned long offset;
67576 +
67577 + if (!n)
67578 + return;
67579 +
67580 + if (ZERO_OR_NULL_PTR(ptr))
67581 + goto report;
67582 +
67583 + if (!virt_addr_valid(ptr))
67584 + return;
67585 +
67586 + page = virt_to_head_page(ptr);
67587 +
67588 + if (!PageSlab(page)) {
67589 + if (object_is_on_stack(ptr, n) == -1)
67590 + goto report;
67591 + return;
67592 + }
67593 +
67594 + s = page->slab;
67595 + if (!(s->flags & SLAB_USERCOPY))
67596 + goto report;
67597 +
67598 + offset = (ptr - page_address(page)) % s->size;
67599 + if (offset <= s->objsize && n <= s->objsize - offset)
67600 + return;
67601 +
67602 +report:
67603 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
67604 +#endif
67605 +
67606 +}
67607 +EXPORT_SYMBOL(check_object_size);
67608 +
67609 size_t ksize(const void *object)
67610 {
67611 struct page *page;
67612 @@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
67613 int node;
67614
67615 list_add(&s->list, &slab_caches);
67616 - s->refcount = -1;
67617 + atomic_set(&s->refcount, -1);
67618
67619 for_each_node_state(node, N_NORMAL_MEMORY) {
67620 struct kmem_cache_node *n = get_node(s, node);
67621 @@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
67622
67623 /* Caches that are not of the two-to-the-power-of size */
67624 if (KMALLOC_MIN_SIZE <= 32) {
67625 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
67626 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
67627 caches++;
67628 }
67629
67630 if (KMALLOC_MIN_SIZE <= 64) {
67631 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
67632 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
67633 caches++;
67634 }
67635
67636 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
67637 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
67638 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
67639 caches++;
67640 }
67641
67642 @@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
67643 /*
67644 * We may have set a slab to be unmergeable during bootstrap.
67645 */
67646 - if (s->refcount < 0)
67647 + if (atomic_read(&s->refcount) < 0)
67648 return 1;
67649
67650 return 0;
67651 @@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
67652 down_write(&slub_lock);
67653 s = find_mergeable(size, align, flags, name, ctor);
67654 if (s) {
67655 - s->refcount++;
67656 + atomic_inc(&s->refcount);
67657 /*
67658 * Adjust the object sizes so that we clear
67659 * the complete object on kzalloc.
67660 @@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
67661 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
67662
67663 if (sysfs_slab_alias(s, name)) {
67664 - s->refcount--;
67665 + atomic_dec(&s->refcount);
67666 goto err;
67667 }
67668 up_write(&slub_lock);
67669 @@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
67670 }
67671 #endif
67672
67673 -#ifdef CONFIG_SYSFS
67674 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
67675 static int count_inuse(struct page *page)
67676 {
67677 return page->inuse;
67678 @@ -3935,12 +3976,12 @@ static void resiliency_test(void)
67679 validate_slab_cache(kmalloc_caches[9]);
67680 }
67681 #else
67682 -#ifdef CONFIG_SYSFS
67683 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
67684 static void resiliency_test(void) {};
67685 #endif
67686 #endif
67687
67688 -#ifdef CONFIG_SYSFS
67689 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
67690 enum slab_stat_type {
67691 SL_ALL, /* All slabs */
67692 SL_PARTIAL, /* Only partially allocated slabs */
67693 @@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
67694
67695 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
67696 {
67697 - return sprintf(buf, "%d\n", s->refcount - 1);
67698 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
67699 }
67700 SLAB_ATTR_RO(aliases);
67701
67702 @@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
67703 return name;
67704 }
67705
67706 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
67707 static int sysfs_slab_add(struct kmem_cache *s)
67708 {
67709 int err;
67710 @@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
67711 kobject_del(&s->kobj);
67712 kobject_put(&s->kobj);
67713 }
67714 +#endif
67715
67716 /*
67717 * Need to buffer aliases during bootup until sysfs becomes
67718 @@ -4737,6 +4780,7 @@ struct saved_alias {
67719
67720 static struct saved_alias *alias_list;
67721
67722 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
67723 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
67724 {
67725 struct saved_alias *al;
67726 @@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
67727 alias_list = al;
67728 return 0;
67729 }
67730 +#endif
67731
67732 static int __init slab_sysfs_init(void)
67733 {
67734 @@ -4894,7 +4939,13 @@ static const struct file_operations proc
67735
67736 static int __init slab_proc_init(void)
67737 {
67738 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
67739 + mode_t gr_mode = S_IRUGO;
67740 +
67741 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67742 + gr_mode = S_IRUSR;
67743 +#endif
67744 +
67745 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
67746 return 0;
67747 }
67748 module_init(slab_proc_init);
67749 diff -urNp linux-3.0.4/mm/swap.c linux-3.0.4/mm/swap.c
67750 --- linux-3.0.4/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
67751 +++ linux-3.0.4/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
67752 @@ -31,6 +31,7 @@
67753 #include <linux/backing-dev.h>
67754 #include <linux/memcontrol.h>
67755 #include <linux/gfp.h>
67756 +#include <linux/hugetlb.h>
67757
67758 #include "internal.h"
67759
67760 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
67761
67762 __page_cache_release(page);
67763 dtor = get_compound_page_dtor(page);
67764 + if (!PageHuge(page))
67765 + BUG_ON(dtor != free_compound_page);
67766 (*dtor)(page);
67767 }
67768
67769 diff -urNp linux-3.0.4/mm/swapfile.c linux-3.0.4/mm/swapfile.c
67770 --- linux-3.0.4/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
67771 +++ linux-3.0.4/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
67772 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
67773
67774 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
67775 /* Activity counter to indicate that a swapon or swapoff has occurred */
67776 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
67777 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
67778
67779 static inline unsigned char swap_count(unsigned char ent)
67780 {
67781 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
67782 }
67783 filp_close(swap_file, NULL);
67784 err = 0;
67785 - atomic_inc(&proc_poll_event);
67786 + atomic_inc_unchecked(&proc_poll_event);
67787 wake_up_interruptible(&proc_poll_wait);
67788
67789 out_dput:
67790 @@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
67791
67792 poll_wait(file, &proc_poll_wait, wait);
67793
67794 - if (s->event != atomic_read(&proc_poll_event)) {
67795 - s->event = atomic_read(&proc_poll_event);
67796 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
67797 + s->event = atomic_read_unchecked(&proc_poll_event);
67798 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
67799 }
67800
67801 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
67802 }
67803
67804 s->seq.private = s;
67805 - s->event = atomic_read(&proc_poll_event);
67806 + s->event = atomic_read_unchecked(&proc_poll_event);
67807 return ret;
67808 }
67809
67810 @@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
67811 (p->flags & SWP_DISCARDABLE) ? "D" : "");
67812
67813 mutex_unlock(&swapon_mutex);
67814 - atomic_inc(&proc_poll_event);
67815 + atomic_inc_unchecked(&proc_poll_event);
67816 wake_up_interruptible(&proc_poll_wait);
67817
67818 if (S_ISREG(inode->i_mode))
67819 diff -urNp linux-3.0.4/mm/util.c linux-3.0.4/mm/util.c
67820 --- linux-3.0.4/mm/util.c 2011-07-21 22:17:23.000000000 -0400
67821 +++ linux-3.0.4/mm/util.c 2011-08-23 21:47:56.000000000 -0400
67822 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
67823 * allocated buffer. Use this if you don't want to free the buffer immediately
67824 * like, for example, with RCU.
67825 */
67826 +#undef __krealloc
67827 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
67828 {
67829 void *ret;
67830 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
67831 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
67832 * %NULL pointer, the object pointed to is freed.
67833 */
67834 +#undef krealloc
67835 void *krealloc(const void *p, size_t new_size, gfp_t flags)
67836 {
67837 void *ret;
67838 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
67839 void arch_pick_mmap_layout(struct mm_struct *mm)
67840 {
67841 mm->mmap_base = TASK_UNMAPPED_BASE;
67842 +
67843 +#ifdef CONFIG_PAX_RANDMMAP
67844 + if (mm->pax_flags & MF_PAX_RANDMMAP)
67845 + mm->mmap_base += mm->delta_mmap;
67846 +#endif
67847 +
67848 mm->get_unmapped_area = arch_get_unmapped_area;
67849 mm->unmap_area = arch_unmap_area;
67850 }
67851 diff -urNp linux-3.0.4/mm/vmalloc.c linux-3.0.4/mm/vmalloc.c
67852 --- linux-3.0.4/mm/vmalloc.c 2011-09-02 18:11:21.000000000 -0400
67853 +++ linux-3.0.4/mm/vmalloc.c 2011-08-23 21:47:56.000000000 -0400
67854 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67855
67856 pte = pte_offset_kernel(pmd, addr);
67857 do {
67858 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67859 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67860 +
67861 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67862 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67863 + BUG_ON(!pte_exec(*pte));
67864 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67865 + continue;
67866 + }
67867 +#endif
67868 +
67869 + {
67870 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67871 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67872 + }
67873 } while (pte++, addr += PAGE_SIZE, addr != end);
67874 }
67875
67876 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67877 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67878 {
67879 pte_t *pte;
67880 + int ret = -ENOMEM;
67881
67882 /*
67883 * nr is a running index into the array which helps higher level
67884 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
67885 pte = pte_alloc_kernel(pmd, addr);
67886 if (!pte)
67887 return -ENOMEM;
67888 +
67889 + pax_open_kernel();
67890 do {
67891 struct page *page = pages[*nr];
67892
67893 - if (WARN_ON(!pte_none(*pte)))
67894 - return -EBUSY;
67895 - if (WARN_ON(!page))
67896 - return -ENOMEM;
67897 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67898 + if (pgprot_val(prot) & _PAGE_NX)
67899 +#endif
67900 +
67901 + if (WARN_ON(!pte_none(*pte))) {
67902 + ret = -EBUSY;
67903 + goto out;
67904 + }
67905 + if (WARN_ON(!page)) {
67906 + ret = -ENOMEM;
67907 + goto out;
67908 + }
67909 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67910 (*nr)++;
67911 } while (pte++, addr += PAGE_SIZE, addr != end);
67912 - return 0;
67913 + ret = 0;
67914 +out:
67915 + pax_close_kernel();
67916 + return ret;
67917 }
67918
67919 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67920 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
67921 * and fall back on vmalloc() if that fails. Others
67922 * just put it in the vmalloc space.
67923 */
67924 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67925 +#ifdef CONFIG_MODULES
67926 +#ifdef MODULES_VADDR
67927 unsigned long addr = (unsigned long)x;
67928 if (addr >= MODULES_VADDR && addr < MODULES_END)
67929 return 1;
67930 #endif
67931 +
67932 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67933 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67934 + return 1;
67935 +#endif
67936 +
67937 +#endif
67938 +
67939 return is_vmalloc_addr(x);
67940 }
67941
67942 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
67943
67944 if (!pgd_none(*pgd)) {
67945 pud_t *pud = pud_offset(pgd, addr);
67946 +#ifdef CONFIG_X86
67947 + if (!pud_large(*pud))
67948 +#endif
67949 if (!pud_none(*pud)) {
67950 pmd_t *pmd = pmd_offset(pud, addr);
67951 +#ifdef CONFIG_X86
67952 + if (!pmd_large(*pmd))
67953 +#endif
67954 if (!pmd_none(*pmd)) {
67955 pte_t *ptep, pte;
67956
67957 @@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
67958 struct vm_struct *area;
67959
67960 BUG_ON(in_interrupt());
67961 +
67962 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67963 + if (flags & VM_KERNEXEC) {
67964 + if (start != VMALLOC_START || end != VMALLOC_END)
67965 + return NULL;
67966 + start = (unsigned long)MODULES_EXEC_VADDR;
67967 + end = (unsigned long)MODULES_EXEC_END;
67968 + }
67969 +#endif
67970 +
67971 if (flags & VM_IOREMAP) {
67972 int bit = fls(size);
67973
67974 @@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
67975 if (count > totalram_pages)
67976 return NULL;
67977
67978 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67979 + if (!(pgprot_val(prot) & _PAGE_NX))
67980 + flags |= VM_KERNEXEC;
67981 +#endif
67982 +
67983 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67984 __builtin_return_address(0));
67985 if (!area)
67986 @@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
67987 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67988 return NULL;
67989
67990 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67991 + if (!(pgprot_val(prot) & _PAGE_NX))
67992 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67993 + node, gfp_mask, caller);
67994 + else
67995 +#endif
67996 +
67997 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
67998 gfp_mask, caller);
67999
68000 @@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
68001 gfp_mask, prot, node, caller);
68002 }
68003
68004 +#undef __vmalloc
68005 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
68006 {
68007 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
68008 @@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
68009 * For tight control over page level allocator and protection flags
68010 * use __vmalloc() instead.
68011 */
68012 +#undef vmalloc
68013 void *vmalloc(unsigned long size)
68014 {
68015 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
68016 @@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
68017 * For tight control over page level allocator and protection flags
68018 * use __vmalloc() instead.
68019 */
68020 +#undef vzalloc
68021 void *vzalloc(unsigned long size)
68022 {
68023 return __vmalloc_node_flags(size, -1,
68024 @@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
68025 * The resulting memory area is zeroed so it can be mapped to userspace
68026 * without leaking data.
68027 */
68028 +#undef vmalloc_user
68029 void *vmalloc_user(unsigned long size)
68030 {
68031 struct vm_struct *area;
68032 @@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
68033 * For tight control over page level allocator and protection flags
68034 * use __vmalloc() instead.
68035 */
68036 +#undef vmalloc_node
68037 void *vmalloc_node(unsigned long size, int node)
68038 {
68039 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
68040 @@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
68041 * For tight control over page level allocator and protection flags
68042 * use __vmalloc_node() instead.
68043 */
68044 +#undef vzalloc_node
68045 void *vzalloc_node(unsigned long size, int node)
68046 {
68047 return __vmalloc_node_flags(size, node,
68048 @@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
68049 * For tight control over page level allocator and protection flags
68050 * use __vmalloc() instead.
68051 */
68052 -
68053 +#undef vmalloc_exec
68054 void *vmalloc_exec(unsigned long size)
68055 {
68056 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
68057 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
68058 -1, __builtin_return_address(0));
68059 }
68060
68061 @@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
68062 * Allocate enough 32bit PA addressable pages to cover @size from the
68063 * page level allocator and map them into contiguous kernel virtual space.
68064 */
68065 +#undef vmalloc_32
68066 void *vmalloc_32(unsigned long size)
68067 {
68068 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
68069 @@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
68070 * The resulting memory area is 32bit addressable and zeroed so it can be
68071 * mapped to userspace without leaking data.
68072 */
68073 +#undef vmalloc_32_user
68074 void *vmalloc_32_user(unsigned long size)
68075 {
68076 struct vm_struct *area;
68077 @@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
68078 unsigned long uaddr = vma->vm_start;
68079 unsigned long usize = vma->vm_end - vma->vm_start;
68080
68081 + BUG_ON(vma->vm_mirror);
68082 +
68083 if ((PAGE_SIZE-1) & (unsigned long)addr)
68084 return -EINVAL;
68085
68086 diff -urNp linux-3.0.4/mm/vmstat.c linux-3.0.4/mm/vmstat.c
68087 --- linux-3.0.4/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
68088 +++ linux-3.0.4/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
68089 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
68090 *
68091 * vm_stat contains the global counters
68092 */
68093 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68094 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68095 EXPORT_SYMBOL(vm_stat);
68096
68097 #ifdef CONFIG_SMP
68098 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
68099 v = p->vm_stat_diff[i];
68100 p->vm_stat_diff[i] = 0;
68101 local_irq_restore(flags);
68102 - atomic_long_add(v, &zone->vm_stat[i]);
68103 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
68104 global_diff[i] += v;
68105 #ifdef CONFIG_NUMA
68106 /* 3 seconds idle till flush */
68107 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
68108
68109 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
68110 if (global_diff[i])
68111 - atomic_long_add(global_diff[i], &vm_stat[i]);
68112 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
68113 }
68114
68115 #endif
68116 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
68117 start_cpu_timer(cpu);
68118 #endif
68119 #ifdef CONFIG_PROC_FS
68120 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
68121 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
68122 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
68123 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
68124 + {
68125 + mode_t gr_mode = S_IRUGO;
68126 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68127 + gr_mode = S_IRUSR;
68128 +#endif
68129 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
68130 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
68131 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68132 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
68133 +#else
68134 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
68135 +#endif
68136 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
68137 + }
68138 #endif
68139 return 0;
68140 }
68141 diff -urNp linux-3.0.4/net/8021q/vlan.c linux-3.0.4/net/8021q/vlan.c
68142 --- linux-3.0.4/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
68143 +++ linux-3.0.4/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
68144 @@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
68145 err = -EPERM;
68146 if (!capable(CAP_NET_ADMIN))
68147 break;
68148 - if ((args.u.name_type >= 0) &&
68149 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
68150 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
68151 struct vlan_net *vn;
68152
68153 vn = net_generic(net, vlan_net_id);
68154 diff -urNp linux-3.0.4/net/atm/atm_misc.c linux-3.0.4/net/atm/atm_misc.c
68155 --- linux-3.0.4/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
68156 +++ linux-3.0.4/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
68157 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
68158 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
68159 return 1;
68160 atm_return(vcc, truesize);
68161 - atomic_inc(&vcc->stats->rx_drop);
68162 + atomic_inc_unchecked(&vcc->stats->rx_drop);
68163 return 0;
68164 }
68165 EXPORT_SYMBOL(atm_charge);
68166 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
68167 }
68168 }
68169 atm_return(vcc, guess);
68170 - atomic_inc(&vcc->stats->rx_drop);
68171 + atomic_inc_unchecked(&vcc->stats->rx_drop);
68172 return NULL;
68173 }
68174 EXPORT_SYMBOL(atm_alloc_charge);
68175 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
68176
68177 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
68178 {
68179 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
68180 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
68181 __SONET_ITEMS
68182 #undef __HANDLE_ITEM
68183 }
68184 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
68185
68186 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
68187 {
68188 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
68189 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
68190 __SONET_ITEMS
68191 #undef __HANDLE_ITEM
68192 }
68193 diff -urNp linux-3.0.4/net/atm/lec.h linux-3.0.4/net/atm/lec.h
68194 --- linux-3.0.4/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
68195 +++ linux-3.0.4/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
68196 @@ -48,7 +48,7 @@ struct lane2_ops {
68197 const u8 *tlvs, u32 sizeoftlvs);
68198 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
68199 const u8 *tlvs, u32 sizeoftlvs);
68200 -};
68201 +} __no_const;
68202
68203 /*
68204 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
68205 diff -urNp linux-3.0.4/net/atm/mpc.h linux-3.0.4/net/atm/mpc.h
68206 --- linux-3.0.4/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
68207 +++ linux-3.0.4/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
68208 @@ -33,7 +33,7 @@ struct mpoa_client {
68209 struct mpc_parameters parameters; /* parameters for this client */
68210
68211 const struct net_device_ops *old_ops;
68212 - struct net_device_ops new_ops;
68213 + net_device_ops_no_const new_ops;
68214 };
68215
68216
68217 diff -urNp linux-3.0.4/net/atm/mpoa_caches.c linux-3.0.4/net/atm/mpoa_caches.c
68218 --- linux-3.0.4/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
68219 +++ linux-3.0.4/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
68220 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
68221 struct timeval now;
68222 struct k_message msg;
68223
68224 + pax_track_stack();
68225 +
68226 do_gettimeofday(&now);
68227
68228 read_lock_bh(&client->ingress_lock);
68229 diff -urNp linux-3.0.4/net/atm/proc.c linux-3.0.4/net/atm/proc.c
68230 --- linux-3.0.4/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
68231 +++ linux-3.0.4/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
68232 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
68233 const struct k_atm_aal_stats *stats)
68234 {
68235 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
68236 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
68237 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
68238 - atomic_read(&stats->rx_drop));
68239 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
68240 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
68241 + atomic_read_unchecked(&stats->rx_drop));
68242 }
68243
68244 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
68245 diff -urNp linux-3.0.4/net/atm/resources.c linux-3.0.4/net/atm/resources.c
68246 --- linux-3.0.4/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
68247 +++ linux-3.0.4/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
68248 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
68249 static void copy_aal_stats(struct k_atm_aal_stats *from,
68250 struct atm_aal_stats *to)
68251 {
68252 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
68253 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
68254 __AAL_STAT_ITEMS
68255 #undef __HANDLE_ITEM
68256 }
68257 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
68258 static void subtract_aal_stats(struct k_atm_aal_stats *from,
68259 struct atm_aal_stats *to)
68260 {
68261 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
68262 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
68263 __AAL_STAT_ITEMS
68264 #undef __HANDLE_ITEM
68265 }
68266 diff -urNp linux-3.0.4/net/batman-adv/hard-interface.c linux-3.0.4/net/batman-adv/hard-interface.c
68267 --- linux-3.0.4/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
68268 +++ linux-3.0.4/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
68269 @@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
68270 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
68271 dev_add_pack(&hard_iface->batman_adv_ptype);
68272
68273 - atomic_set(&hard_iface->seqno, 1);
68274 - atomic_set(&hard_iface->frag_seqno, 1);
68275 + atomic_set_unchecked(&hard_iface->seqno, 1);
68276 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
68277 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
68278 hard_iface->net_dev->name);
68279
68280 diff -urNp linux-3.0.4/net/batman-adv/routing.c linux-3.0.4/net/batman-adv/routing.c
68281 --- linux-3.0.4/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
68282 +++ linux-3.0.4/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
68283 @@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
68284 return;
68285
68286 /* could be changed by schedule_own_packet() */
68287 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
68288 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
68289
68290 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
68291
68292 diff -urNp linux-3.0.4/net/batman-adv/send.c linux-3.0.4/net/batman-adv/send.c
68293 --- linux-3.0.4/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
68294 +++ linux-3.0.4/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
68295 @@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
68296
68297 /* change sequence number to network order */
68298 batman_packet->seqno =
68299 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
68300 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
68301
68302 if (vis_server == VIS_TYPE_SERVER_SYNC)
68303 batman_packet->flags |= VIS_SERVER;
68304 @@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
68305 else
68306 batman_packet->gw_flags = 0;
68307
68308 - atomic_inc(&hard_iface->seqno);
68309 + atomic_inc_unchecked(&hard_iface->seqno);
68310
68311 slide_own_bcast_window(hard_iface);
68312 send_time = own_send_time(bat_priv);
68313 diff -urNp linux-3.0.4/net/batman-adv/soft-interface.c linux-3.0.4/net/batman-adv/soft-interface.c
68314 --- linux-3.0.4/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
68315 +++ linux-3.0.4/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
68316 @@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
68317
68318 /* set broadcast sequence number */
68319 bcast_packet->seqno =
68320 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
68321 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
68322
68323 add_bcast_packet_to_list(bat_priv, skb);
68324
68325 @@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
68326 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
68327
68328 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
68329 - atomic_set(&bat_priv->bcast_seqno, 1);
68330 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
68331 atomic_set(&bat_priv->tt_local_changed, 0);
68332
68333 bat_priv->primary_if = NULL;
68334 diff -urNp linux-3.0.4/net/batman-adv/types.h linux-3.0.4/net/batman-adv/types.h
68335 --- linux-3.0.4/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
68336 +++ linux-3.0.4/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
68337 @@ -38,8 +38,8 @@ struct hard_iface {
68338 int16_t if_num;
68339 char if_status;
68340 struct net_device *net_dev;
68341 - atomic_t seqno;
68342 - atomic_t frag_seqno;
68343 + atomic_unchecked_t seqno;
68344 + atomic_unchecked_t frag_seqno;
68345 unsigned char *packet_buff;
68346 int packet_len;
68347 struct kobject *hardif_obj;
68348 @@ -142,7 +142,7 @@ struct bat_priv {
68349 atomic_t orig_interval; /* uint */
68350 atomic_t hop_penalty; /* uint */
68351 atomic_t log_level; /* uint */
68352 - atomic_t bcast_seqno;
68353 + atomic_unchecked_t bcast_seqno;
68354 atomic_t bcast_queue_left;
68355 atomic_t batman_queue_left;
68356 char num_ifaces;
68357 diff -urNp linux-3.0.4/net/batman-adv/unicast.c linux-3.0.4/net/batman-adv/unicast.c
68358 --- linux-3.0.4/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
68359 +++ linux-3.0.4/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
68360 @@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
68361 frag1->flags = UNI_FRAG_HEAD | large_tail;
68362 frag2->flags = large_tail;
68363
68364 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
68365 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
68366 frag1->seqno = htons(seqno - 1);
68367 frag2->seqno = htons(seqno);
68368
68369 diff -urNp linux-3.0.4/net/bridge/br_multicast.c linux-3.0.4/net/bridge/br_multicast.c
68370 --- linux-3.0.4/net/bridge/br_multicast.c 2011-07-21 22:17:23.000000000 -0400
68371 +++ linux-3.0.4/net/bridge/br_multicast.c 2011-08-23 21:47:56.000000000 -0400
68372 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
68373 nexthdr = ip6h->nexthdr;
68374 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
68375
68376 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
68377 + if (nexthdr != IPPROTO_ICMPV6)
68378 return 0;
68379
68380 /* Okay, we found ICMPv6 header */
68381 diff -urNp linux-3.0.4/net/bridge/netfilter/ebtables.c linux-3.0.4/net/bridge/netfilter/ebtables.c
68382 --- linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
68383 +++ linux-3.0.4/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
68384 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
68385 tmp.valid_hooks = t->table->valid_hooks;
68386 }
68387 mutex_unlock(&ebt_mutex);
68388 - if (copy_to_user(user, &tmp, *len) != 0){
68389 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
68390 BUGPRINT("c2u Didn't work\n");
68391 ret = -EFAULT;
68392 break;
68393 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
68394 int ret;
68395 void __user *pos;
68396
68397 + pax_track_stack();
68398 +
68399 memset(&tinfo, 0, sizeof(tinfo));
68400
68401 if (cmd == EBT_SO_GET_ENTRIES) {
68402 diff -urNp linux-3.0.4/net/caif/caif_socket.c linux-3.0.4/net/caif/caif_socket.c
68403 --- linux-3.0.4/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
68404 +++ linux-3.0.4/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
68405 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
68406 #ifdef CONFIG_DEBUG_FS
68407 struct debug_fs_counter {
68408 atomic_t caif_nr_socks;
68409 - atomic_t caif_sock_create;
68410 - atomic_t num_connect_req;
68411 - atomic_t num_connect_resp;
68412 - atomic_t num_connect_fail_resp;
68413 - atomic_t num_disconnect;
68414 - atomic_t num_remote_shutdown_ind;
68415 - atomic_t num_tx_flow_off_ind;
68416 - atomic_t num_tx_flow_on_ind;
68417 - atomic_t num_rx_flow_off;
68418 - atomic_t num_rx_flow_on;
68419 + atomic_unchecked_t caif_sock_create;
68420 + atomic_unchecked_t num_connect_req;
68421 + atomic_unchecked_t num_connect_resp;
68422 + atomic_unchecked_t num_connect_fail_resp;
68423 + atomic_unchecked_t num_disconnect;
68424 + atomic_unchecked_t num_remote_shutdown_ind;
68425 + atomic_unchecked_t num_tx_flow_off_ind;
68426 + atomic_unchecked_t num_tx_flow_on_ind;
68427 + atomic_unchecked_t num_rx_flow_off;
68428 + atomic_unchecked_t num_rx_flow_on;
68429 };
68430 static struct debug_fs_counter cnt;
68431 #define dbfs_atomic_inc(v) atomic_inc_return(v)
68432 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
68433 #define dbfs_atomic_dec(v) atomic_dec_return(v)
68434 #else
68435 #define dbfs_atomic_inc(v) 0
68436 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
68437 atomic_read(&cf_sk->sk.sk_rmem_alloc),
68438 sk_rcvbuf_lowwater(cf_sk));
68439 set_rx_flow_off(cf_sk);
68440 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
68441 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
68442 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
68443 }
68444
68445 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
68446 set_rx_flow_off(cf_sk);
68447 if (net_ratelimit())
68448 pr_debug("sending flow OFF due to rmem_schedule\n");
68449 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
68450 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
68451 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
68452 }
68453 skb->dev = NULL;
68454 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
68455 switch (flow) {
68456 case CAIF_CTRLCMD_FLOW_ON_IND:
68457 /* OK from modem to start sending again */
68458 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
68459 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
68460 set_tx_flow_on(cf_sk);
68461 cf_sk->sk.sk_state_change(&cf_sk->sk);
68462 break;
68463
68464 case CAIF_CTRLCMD_FLOW_OFF_IND:
68465 /* Modem asks us to shut up */
68466 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
68467 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
68468 set_tx_flow_off(cf_sk);
68469 cf_sk->sk.sk_state_change(&cf_sk->sk);
68470 break;
68471 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
68472 /* We're now connected */
68473 caif_client_register_refcnt(&cf_sk->layer,
68474 cfsk_hold, cfsk_put);
68475 - dbfs_atomic_inc(&cnt.num_connect_resp);
68476 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
68477 cf_sk->sk.sk_state = CAIF_CONNECTED;
68478 set_tx_flow_on(cf_sk);
68479 cf_sk->sk.sk_state_change(&cf_sk->sk);
68480 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
68481
68482 case CAIF_CTRLCMD_INIT_FAIL_RSP:
68483 /* Connect request failed */
68484 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
68485 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
68486 cf_sk->sk.sk_err = ECONNREFUSED;
68487 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
68488 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
68489 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
68490
68491 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
68492 /* Modem has closed this connection, or device is down. */
68493 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
68494 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
68495 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
68496 cf_sk->sk.sk_err = ECONNRESET;
68497 set_rx_flow_on(cf_sk);
68498 @@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
68499 return;
68500
68501 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
68502 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
68503 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
68504 set_rx_flow_on(cf_sk);
68505 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
68506 }
68507 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
68508 /*ifindex = id of the interface.*/
68509 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
68510
68511 - dbfs_atomic_inc(&cnt.num_connect_req);
68512 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
68513 cf_sk->layer.receive = caif_sktrecv_cb;
68514
68515 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
68516 @@ -943,7 +944,7 @@ static int caif_release(struct socket *s
68517 spin_unlock_bh(&sk->sk_receive_queue.lock);
68518 sock->sk = NULL;
68519
68520 - dbfs_atomic_inc(&cnt.num_disconnect);
68521 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
68522
68523 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
68524 if (cf_sk->debugfs_socket_dir != NULL)
68525 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
68526 cf_sk->conn_req.protocol = protocol;
68527 /* Increase the number of sockets created. */
68528 dbfs_atomic_inc(&cnt.caif_nr_socks);
68529 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
68530 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
68531 #ifdef CONFIG_DEBUG_FS
68532 if (!IS_ERR(debugfsdir)) {
68533
68534 diff -urNp linux-3.0.4/net/caif/cfctrl.c linux-3.0.4/net/caif/cfctrl.c
68535 --- linux-3.0.4/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
68536 +++ linux-3.0.4/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
68537 @@ -9,6 +9,7 @@
68538 #include <linux/stddef.h>
68539 #include <linux/spinlock.h>
68540 #include <linux/slab.h>
68541 +#include <linux/sched.h>
68542 #include <net/caif/caif_layer.h>
68543 #include <net/caif/cfpkt.h>
68544 #include <net/caif/cfctrl.h>
68545 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
68546 dev_info.id = 0xff;
68547 memset(this, 0, sizeof(*this));
68548 cfsrvl_init(&this->serv, 0, &dev_info, false);
68549 - atomic_set(&this->req_seq_no, 1);
68550 - atomic_set(&this->rsp_seq_no, 1);
68551 + atomic_set_unchecked(&this->req_seq_no, 1);
68552 + atomic_set_unchecked(&this->rsp_seq_no, 1);
68553 this->serv.layer.receive = cfctrl_recv;
68554 sprintf(this->serv.layer.name, "ctrl");
68555 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
68556 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
68557 struct cfctrl_request_info *req)
68558 {
68559 spin_lock_bh(&ctrl->info_list_lock);
68560 - atomic_inc(&ctrl->req_seq_no);
68561 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
68562 + atomic_inc_unchecked(&ctrl->req_seq_no);
68563 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
68564 list_add_tail(&req->list, &ctrl->list);
68565 spin_unlock_bh(&ctrl->info_list_lock);
68566 }
68567 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
68568 if (p != first)
68569 pr_warn("Requests are not received in order\n");
68570
68571 - atomic_set(&ctrl->rsp_seq_no,
68572 + atomic_set_unchecked(&ctrl->rsp_seq_no,
68573 p->sequence_no);
68574 list_del(&p->list);
68575 goto out;
68576 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
68577 struct cfctrl *cfctrl = container_obj(layer);
68578 struct cfctrl_request_info rsp, *req;
68579
68580 + pax_track_stack();
68581
68582 cfpkt_extr_head(pkt, &cmdrsp, 1);
68583 cmd = cmdrsp & CFCTRL_CMD_MASK;
68584 diff -urNp linux-3.0.4/net/core/datagram.c linux-3.0.4/net/core/datagram.c
68585 --- linux-3.0.4/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
68586 +++ linux-3.0.4/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
68587 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
68588 }
68589
68590 kfree_skb(skb);
68591 - atomic_inc(&sk->sk_drops);
68592 + atomic_inc_unchecked(&sk->sk_drops);
68593 sk_mem_reclaim_partial(sk);
68594
68595 return err;
68596 diff -urNp linux-3.0.4/net/core/dev.c linux-3.0.4/net/core/dev.c
68597 --- linux-3.0.4/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
68598 +++ linux-3.0.4/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
68599 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
68600 if (no_module && capable(CAP_NET_ADMIN))
68601 no_module = request_module("netdev-%s", name);
68602 if (no_module && capable(CAP_SYS_MODULE)) {
68603 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68604 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
68605 +#else
68606 if (!request_module("%s", name))
68607 pr_err("Loading kernel module for a network device "
68608 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
68609 "instead\n", name);
68610 +#endif
68611 }
68612 }
68613 EXPORT_SYMBOL(dev_load);
68614 @@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
68615
68616 struct dev_gso_cb {
68617 void (*destructor)(struct sk_buff *skb);
68618 -};
68619 +} __no_const;
68620
68621 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
68622
68623 @@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
68624 }
68625 EXPORT_SYMBOL(netif_rx_ni);
68626
68627 -static void net_tx_action(struct softirq_action *h)
68628 +static void net_tx_action(void)
68629 {
68630 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68631
68632 @@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
68633 }
68634 EXPORT_SYMBOL(netif_napi_del);
68635
68636 -static void net_rx_action(struct softirq_action *h)
68637 +static void net_rx_action(void)
68638 {
68639 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68640 unsigned long time_limit = jiffies + 2;
68641 diff -urNp linux-3.0.4/net/core/flow.c linux-3.0.4/net/core/flow.c
68642 --- linux-3.0.4/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
68643 +++ linux-3.0.4/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
68644 @@ -60,7 +60,7 @@ struct flow_cache {
68645 struct timer_list rnd_timer;
68646 };
68647
68648 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
68649 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
68650 EXPORT_SYMBOL(flow_cache_genid);
68651 static struct flow_cache flow_cache_global;
68652 static struct kmem_cache *flow_cachep __read_mostly;
68653 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
68654
68655 static int flow_entry_valid(struct flow_cache_entry *fle)
68656 {
68657 - if (atomic_read(&flow_cache_genid) != fle->genid)
68658 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
68659 return 0;
68660 if (fle->object && !fle->object->ops->check(fle->object))
68661 return 0;
68662 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
68663 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
68664 fcp->hash_count++;
68665 }
68666 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
68667 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
68668 flo = fle->object;
68669 if (!flo)
68670 goto ret_object;
68671 @@ -274,7 +274,7 @@ nocache:
68672 }
68673 flo = resolver(net, key, family, dir, flo, ctx);
68674 if (fle) {
68675 - fle->genid = atomic_read(&flow_cache_genid);
68676 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
68677 if (!IS_ERR(flo))
68678 fle->object = flo;
68679 else
68680 diff -urNp linux-3.0.4/net/core/rtnetlink.c linux-3.0.4/net/core/rtnetlink.c
68681 --- linux-3.0.4/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
68682 +++ linux-3.0.4/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
68683 @@ -56,7 +56,7 @@
68684 struct rtnl_link {
68685 rtnl_doit_func doit;
68686 rtnl_dumpit_func dumpit;
68687 -};
68688 +} __no_const;
68689
68690 static DEFINE_MUTEX(rtnl_mutex);
68691
68692 diff -urNp linux-3.0.4/net/core/skbuff.c linux-3.0.4/net/core/skbuff.c
68693 --- linux-3.0.4/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
68694 +++ linux-3.0.4/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
68695 @@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
68696 struct sock *sk = skb->sk;
68697 int ret = 0;
68698
68699 + pax_track_stack();
68700 +
68701 if (splice_grow_spd(pipe, &spd))
68702 return -ENOMEM;
68703
68704 diff -urNp linux-3.0.4/net/core/sock.c linux-3.0.4/net/core/sock.c
68705 --- linux-3.0.4/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
68706 +++ linux-3.0.4/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
68707 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68708 */
68709 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
68710 (unsigned)sk->sk_rcvbuf) {
68711 - atomic_inc(&sk->sk_drops);
68712 + atomic_inc_unchecked(&sk->sk_drops);
68713 return -ENOMEM;
68714 }
68715
68716 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68717 return err;
68718
68719 if (!sk_rmem_schedule(sk, skb->truesize)) {
68720 - atomic_inc(&sk->sk_drops);
68721 + atomic_inc_unchecked(&sk->sk_drops);
68722 return -ENOBUFS;
68723 }
68724
68725 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
68726 skb_dst_force(skb);
68727
68728 spin_lock_irqsave(&list->lock, flags);
68729 - skb->dropcount = atomic_read(&sk->sk_drops);
68730 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
68731 __skb_queue_tail(list, skb);
68732 spin_unlock_irqrestore(&list->lock, flags);
68733
68734 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
68735 skb->dev = NULL;
68736
68737 if (sk_rcvqueues_full(sk, skb)) {
68738 - atomic_inc(&sk->sk_drops);
68739 + atomic_inc_unchecked(&sk->sk_drops);
68740 goto discard_and_relse;
68741 }
68742 if (nested)
68743 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
68744 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
68745 } else if (sk_add_backlog(sk, skb)) {
68746 bh_unlock_sock(sk);
68747 - atomic_inc(&sk->sk_drops);
68748 + atomic_inc_unchecked(&sk->sk_drops);
68749 goto discard_and_relse;
68750 }
68751
68752 @@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
68753 if (len > sizeof(peercred))
68754 len = sizeof(peercred);
68755 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
68756 - if (copy_to_user(optval, &peercred, len))
68757 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
68758 return -EFAULT;
68759 goto lenout;
68760 }
68761 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
68762 return -ENOTCONN;
68763 if (lv < len)
68764 return -EINVAL;
68765 - if (copy_to_user(optval, address, len))
68766 + if (len > sizeof(address) || copy_to_user(optval, address, len))
68767 return -EFAULT;
68768 goto lenout;
68769 }
68770 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
68771
68772 if (len > lv)
68773 len = lv;
68774 - if (copy_to_user(optval, &v, len))
68775 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
68776 return -EFAULT;
68777 lenout:
68778 if (put_user(len, optlen))
68779 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
68780 */
68781 smp_wmb();
68782 atomic_set(&sk->sk_refcnt, 1);
68783 - atomic_set(&sk->sk_drops, 0);
68784 + atomic_set_unchecked(&sk->sk_drops, 0);
68785 }
68786 EXPORT_SYMBOL(sock_init_data);
68787
68788 diff -urNp linux-3.0.4/net/decnet/sysctl_net_decnet.c linux-3.0.4/net/decnet/sysctl_net_decnet.c
68789 --- linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
68790 +++ linux-3.0.4/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
68791 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
68792
68793 if (len > *lenp) len = *lenp;
68794
68795 - if (copy_to_user(buffer, addr, len))
68796 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
68797 return -EFAULT;
68798
68799 *lenp = len;
68800 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
68801
68802 if (len > *lenp) len = *lenp;
68803
68804 - if (copy_to_user(buffer, devname, len))
68805 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
68806 return -EFAULT;
68807
68808 *lenp = len;
68809 diff -urNp linux-3.0.4/net/econet/Kconfig linux-3.0.4/net/econet/Kconfig
68810 --- linux-3.0.4/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
68811 +++ linux-3.0.4/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
68812 @@ -4,7 +4,7 @@
68813
68814 config ECONET
68815 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68816 - depends on EXPERIMENTAL && INET
68817 + depends on EXPERIMENTAL && INET && BROKEN
68818 ---help---
68819 Econet is a fairly old and slow networking protocol mainly used by
68820 Acorn computers to access file and print servers. It uses native
68821 diff -urNp linux-3.0.4/net/ipv4/fib_frontend.c linux-3.0.4/net/ipv4/fib_frontend.c
68822 --- linux-3.0.4/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
68823 +++ linux-3.0.4/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
68824 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
68825 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68826 fib_sync_up(dev);
68827 #endif
68828 - atomic_inc(&net->ipv4.dev_addr_genid);
68829 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68830 rt_cache_flush(dev_net(dev), -1);
68831 break;
68832 case NETDEV_DOWN:
68833 fib_del_ifaddr(ifa, NULL);
68834 - atomic_inc(&net->ipv4.dev_addr_genid);
68835 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68836 if (ifa->ifa_dev->ifa_list == NULL) {
68837 /* Last address was deleted from this interface.
68838 * Disable IP.
68839 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
68840 #ifdef CONFIG_IP_ROUTE_MULTIPATH
68841 fib_sync_up(dev);
68842 #endif
68843 - atomic_inc(&net->ipv4.dev_addr_genid);
68844 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
68845 rt_cache_flush(dev_net(dev), -1);
68846 break;
68847 case NETDEV_DOWN:
68848 diff -urNp linux-3.0.4/net/ipv4/fib_semantics.c linux-3.0.4/net/ipv4/fib_semantics.c
68849 --- linux-3.0.4/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
68850 +++ linux-3.0.4/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
68851 @@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
68852 nh->nh_saddr = inet_select_addr(nh->nh_dev,
68853 nh->nh_gw,
68854 nh->nh_parent->fib_scope);
68855 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
68856 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
68857
68858 return nh->nh_saddr;
68859 }
68860 diff -urNp linux-3.0.4/net/ipv4/inet_diag.c linux-3.0.4/net/ipv4/inet_diag.c
68861 --- linux-3.0.4/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
68862 +++ linux-3.0.4/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
68863 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
68864 r->idiag_retrans = 0;
68865
68866 r->id.idiag_if = sk->sk_bound_dev_if;
68867 +
68868 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68869 + r->id.idiag_cookie[0] = 0;
68870 + r->id.idiag_cookie[1] = 0;
68871 +#else
68872 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68873 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68874 +#endif
68875
68876 r->id.idiag_sport = inet->inet_sport;
68877 r->id.idiag_dport = inet->inet_dport;
68878 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
68879 r->idiag_family = tw->tw_family;
68880 r->idiag_retrans = 0;
68881 r->id.idiag_if = tw->tw_bound_dev_if;
68882 +
68883 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68884 + r->id.idiag_cookie[0] = 0;
68885 + r->id.idiag_cookie[1] = 0;
68886 +#else
68887 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68888 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68889 +#endif
68890 +
68891 r->id.idiag_sport = tw->tw_sport;
68892 r->id.idiag_dport = tw->tw_dport;
68893 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68894 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
68895 if (sk == NULL)
68896 goto unlock;
68897
68898 +#ifndef CONFIG_GRKERNSEC_HIDESYM
68899 err = -ESTALE;
68900 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68901 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68902 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68903 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68904 goto out;
68905 +#endif
68906
68907 err = -ENOMEM;
68908 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68909 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
68910 r->idiag_retrans = req->retrans;
68911
68912 r->id.idiag_if = sk->sk_bound_dev_if;
68913 +
68914 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68915 + r->id.idiag_cookie[0] = 0;
68916 + r->id.idiag_cookie[1] = 0;
68917 +#else
68918 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68919 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68920 +#endif
68921
68922 tmo = req->expires - jiffies;
68923 if (tmo < 0)
68924 diff -urNp linux-3.0.4/net/ipv4/inet_hashtables.c linux-3.0.4/net/ipv4/inet_hashtables.c
68925 --- linux-3.0.4/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
68926 +++ linux-3.0.4/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
68927 @@ -18,12 +18,15 @@
68928 #include <linux/sched.h>
68929 #include <linux/slab.h>
68930 #include <linux/wait.h>
68931 +#include <linux/security.h>
68932
68933 #include <net/inet_connection_sock.h>
68934 #include <net/inet_hashtables.h>
68935 #include <net/secure_seq.h>
68936 #include <net/ip.h>
68937
68938 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68939 +
68940 /*
68941 * Allocate and initialize a new local port bind bucket.
68942 * The bindhash mutex for snum's hash chain must be held here.
68943 @@ -530,6 +533,8 @@ ok:
68944 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
68945 spin_unlock(&head->lock);
68946
68947 + gr_update_task_in_ip_table(current, inet_sk(sk));
68948 +
68949 if (tw) {
68950 inet_twsk_deschedule(tw, death_row);
68951 while (twrefcnt) {
68952 diff -urNp linux-3.0.4/net/ipv4/inetpeer.c linux-3.0.4/net/ipv4/inetpeer.c
68953 --- linux-3.0.4/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
68954 +++ linux-3.0.4/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
68955 @@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
68956 unsigned int sequence;
68957 int invalidated, newrefcnt = 0;
68958
68959 + pax_track_stack();
68960 +
68961 /* Look up for the address quickly, lockless.
68962 * Because of a concurrent writer, we might not find an existing entry.
68963 */
68964 @@ -517,8 +519,8 @@ found: /* The existing node has been fo
68965 if (p) {
68966 p->daddr = *daddr;
68967 atomic_set(&p->refcnt, 1);
68968 - atomic_set(&p->rid, 0);
68969 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68970 + atomic_set_unchecked(&p->rid, 0);
68971 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
68972 p->tcp_ts_stamp = 0;
68973 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
68974 p->rate_tokens = 0;
68975 diff -urNp linux-3.0.4/net/ipv4/ip_fragment.c linux-3.0.4/net/ipv4/ip_fragment.c
68976 --- linux-3.0.4/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
68977 +++ linux-3.0.4/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
68978 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
68979 return 0;
68980
68981 start = qp->rid;
68982 - end = atomic_inc_return(&peer->rid);
68983 + end = atomic_inc_return_unchecked(&peer->rid);
68984 qp->rid = end;
68985
68986 rc = qp->q.fragments && (end - start) > max;
68987 diff -urNp linux-3.0.4/net/ipv4/ip_sockglue.c linux-3.0.4/net/ipv4/ip_sockglue.c
68988 --- linux-3.0.4/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
68989 +++ linux-3.0.4/net/ipv4/ip_sockglue.c 2011-08-23 21:48:14.000000000 -0400
68990 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
68991 int val;
68992 int len;
68993
68994 + pax_track_stack();
68995 +
68996 if (level != SOL_IP)
68997 return -EOPNOTSUPP;
68998
68999 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
69000 len = min_t(unsigned int, len, opt->optlen);
69001 if (put_user(len, optlen))
69002 return -EFAULT;
69003 - if (copy_to_user(optval, opt->__data, len))
69004 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
69005 + copy_to_user(optval, opt->__data, len))
69006 return -EFAULT;
69007 return 0;
69008 }
69009 diff -urNp linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
69010 --- linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
69011 +++ linux-3.0.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
69012 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
69013
69014 *len = 0;
69015
69016 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
69017 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
69018 if (*octets == NULL) {
69019 if (net_ratelimit())
69020 pr_notice("OOM in bsalg (%d)\n", __LINE__);
69021 diff -urNp linux-3.0.4/net/ipv4/ping.c linux-3.0.4/net/ipv4/ping.c
69022 --- linux-3.0.4/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
69023 +++ linux-3.0.4/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
69024 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
69025 sk_rmem_alloc_get(sp),
69026 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
69027 atomic_read(&sp->sk_refcnt), sp,
69028 - atomic_read(&sp->sk_drops), len);
69029 + atomic_read_unchecked(&sp->sk_drops), len);
69030 }
69031
69032 static int ping_seq_show(struct seq_file *seq, void *v)
69033 diff -urNp linux-3.0.4/net/ipv4/raw.c linux-3.0.4/net/ipv4/raw.c
69034 --- linux-3.0.4/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
69035 +++ linux-3.0.4/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
69036 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
69037 int raw_rcv(struct sock *sk, struct sk_buff *skb)
69038 {
69039 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
69040 - atomic_inc(&sk->sk_drops);
69041 + atomic_inc_unchecked(&sk->sk_drops);
69042 kfree_skb(skb);
69043 return NET_RX_DROP;
69044 }
69045 @@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
69046
69047 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
69048 {
69049 + struct icmp_filter filter;
69050 +
69051 if (optlen > sizeof(struct icmp_filter))
69052 optlen = sizeof(struct icmp_filter);
69053 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
69054 + if (copy_from_user(&filter, optval, optlen))
69055 return -EFAULT;
69056 + raw_sk(sk)->filter = filter;
69057 return 0;
69058 }
69059
69060 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
69061 {
69062 int len, ret = -EFAULT;
69063 + struct icmp_filter filter;
69064
69065 if (get_user(len, optlen))
69066 goto out;
69067 @@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
69068 if (len > sizeof(struct icmp_filter))
69069 len = sizeof(struct icmp_filter);
69070 ret = -EFAULT;
69071 - if (put_user(len, optlen) ||
69072 - copy_to_user(optval, &raw_sk(sk)->filter, len))
69073 + filter = raw_sk(sk)->filter;
69074 + if (put_user(len, optlen) || len > sizeof filter ||
69075 + copy_to_user(optval, &filter, len))
69076 goto out;
69077 ret = 0;
69078 out: return ret;
69079 @@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
69080 sk_wmem_alloc_get(sp),
69081 sk_rmem_alloc_get(sp),
69082 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
69083 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
69084 + atomic_read(&sp->sk_refcnt),
69085 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69086 + NULL,
69087 +#else
69088 + sp,
69089 +#endif
69090 + atomic_read_unchecked(&sp->sk_drops));
69091 }
69092
69093 static int raw_seq_show(struct seq_file *seq, void *v)
69094 diff -urNp linux-3.0.4/net/ipv4/route.c linux-3.0.4/net/ipv4/route.c
69095 --- linux-3.0.4/net/ipv4/route.c 2011-09-02 18:11:21.000000000 -0400
69096 +++ linux-3.0.4/net/ipv4/route.c 2011-08-23 21:47:56.000000000 -0400
69097 @@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
69098
69099 static inline int rt_genid(struct net *net)
69100 {
69101 - return atomic_read(&net->ipv4.rt_genid);
69102 + return atomic_read_unchecked(&net->ipv4.rt_genid);
69103 }
69104
69105 #ifdef CONFIG_PROC_FS
69106 @@ -833,7 +833,7 @@ static void rt_cache_invalidate(struct n
69107 unsigned char shuffle;
69108
69109 get_random_bytes(&shuffle, sizeof(shuffle));
69110 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
69111 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
69112 }
69113
69114 /*
69115 @@ -2834,7 +2834,7 @@ static int rt_fill_info(struct net *net,
69116 error = rt->dst.error;
69117 if (peer) {
69118 inet_peer_refcheck(rt->peer);
69119 - id = atomic_read(&peer->ip_id_count) & 0xffff;
69120 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
69121 if (peer->tcp_ts_stamp) {
69122 ts = peer->tcp_ts;
69123 tsage = get_seconds() - peer->tcp_ts_stamp;
69124 diff -urNp linux-3.0.4/net/ipv4/tcp.c linux-3.0.4/net/ipv4/tcp.c
69125 --- linux-3.0.4/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
69126 +++ linux-3.0.4/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
69127 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
69128 int val;
69129 int err = 0;
69130
69131 + pax_track_stack();
69132 +
69133 /* These are data/string values, all the others are ints */
69134 switch (optname) {
69135 case TCP_CONGESTION: {
69136 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
69137 struct tcp_sock *tp = tcp_sk(sk);
69138 int val, len;
69139
69140 + pax_track_stack();
69141 +
69142 if (get_user(len, optlen))
69143 return -EFAULT;
69144
69145 diff -urNp linux-3.0.4/net/ipv4/tcp_ipv4.c linux-3.0.4/net/ipv4/tcp_ipv4.c
69146 --- linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
69147 +++ linux-3.0.4/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
69148 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
69149 int sysctl_tcp_low_latency __read_mostly;
69150 EXPORT_SYMBOL(sysctl_tcp_low_latency);
69151
69152 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69153 +extern int grsec_enable_blackhole;
69154 +#endif
69155
69156 #ifdef CONFIG_TCP_MD5SIG
69157 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
69158 @@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
69159 return 0;
69160
69161 reset:
69162 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69163 + if (!grsec_enable_blackhole)
69164 +#endif
69165 tcp_v4_send_reset(rsk, skb);
69166 discard:
69167 kfree_skb(skb);
69168 @@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
69169 TCP_SKB_CB(skb)->sacked = 0;
69170
69171 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
69172 - if (!sk)
69173 + if (!sk) {
69174 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69175 + ret = 1;
69176 +#endif
69177 goto no_tcp_socket;
69178 -
69179 + }
69180 process:
69181 - if (sk->sk_state == TCP_TIME_WAIT)
69182 + if (sk->sk_state == TCP_TIME_WAIT) {
69183 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69184 + ret = 2;
69185 +#endif
69186 goto do_time_wait;
69187 + }
69188
69189 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
69190 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
69191 @@ -1724,6 +1737,10 @@ no_tcp_socket:
69192 bad_packet:
69193 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69194 } else {
69195 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69196 + if (!grsec_enable_blackhole || (ret == 1 &&
69197 + (skb->dev->flags & IFF_LOOPBACK)))
69198 +#endif
69199 tcp_v4_send_reset(NULL, skb);
69200 }
69201
69202 @@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
69203 0, /* non standard timer */
69204 0, /* open_requests have no inode */
69205 atomic_read(&sk->sk_refcnt),
69206 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69207 + NULL,
69208 +#else
69209 req,
69210 +#endif
69211 len);
69212 }
69213
69214 @@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
69215 sock_i_uid(sk),
69216 icsk->icsk_probes_out,
69217 sock_i_ino(sk),
69218 - atomic_read(&sk->sk_refcnt), sk,
69219 + atomic_read(&sk->sk_refcnt),
69220 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69221 + NULL,
69222 +#else
69223 + sk,
69224 +#endif
69225 jiffies_to_clock_t(icsk->icsk_rto),
69226 jiffies_to_clock_t(icsk->icsk_ack.ato),
69227 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
69228 @@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
69229 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
69230 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
69231 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69232 - atomic_read(&tw->tw_refcnt), tw, len);
69233 + atomic_read(&tw->tw_refcnt),
69234 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69235 + NULL,
69236 +#else
69237 + tw,
69238 +#endif
69239 + len);
69240 }
69241
69242 #define TMPSZ 150
69243 diff -urNp linux-3.0.4/net/ipv4/tcp_minisocks.c linux-3.0.4/net/ipv4/tcp_minisocks.c
69244 --- linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
69245 +++ linux-3.0.4/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
69246 @@ -27,6 +27,10 @@
69247 #include <net/inet_common.h>
69248 #include <net/xfrm.h>
69249
69250 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69251 +extern int grsec_enable_blackhole;
69252 +#endif
69253 +
69254 int sysctl_tcp_syncookies __read_mostly = 1;
69255 EXPORT_SYMBOL(sysctl_tcp_syncookies);
69256
69257 @@ -745,6 +749,10 @@ listen_overflow:
69258
69259 embryonic_reset:
69260 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
69261 +
69262 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69263 + if (!grsec_enable_blackhole)
69264 +#endif
69265 if (!(flg & TCP_FLAG_RST))
69266 req->rsk_ops->send_reset(sk, skb);
69267
69268 diff -urNp linux-3.0.4/net/ipv4/tcp_output.c linux-3.0.4/net/ipv4/tcp_output.c
69269 --- linux-3.0.4/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
69270 +++ linux-3.0.4/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
69271 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
69272 int mss;
69273 int s_data_desired = 0;
69274
69275 + pax_track_stack();
69276 +
69277 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
69278 s_data_desired = cvp->s_data_desired;
69279 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
69280 diff -urNp linux-3.0.4/net/ipv4/tcp_probe.c linux-3.0.4/net/ipv4/tcp_probe.c
69281 --- linux-3.0.4/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
69282 +++ linux-3.0.4/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
69283 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
69284 if (cnt + width >= len)
69285 break;
69286
69287 - if (copy_to_user(buf + cnt, tbuf, width))
69288 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
69289 return -EFAULT;
69290 cnt += width;
69291 }
69292 diff -urNp linux-3.0.4/net/ipv4/tcp_timer.c linux-3.0.4/net/ipv4/tcp_timer.c
69293 --- linux-3.0.4/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
69294 +++ linux-3.0.4/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
69295 @@ -22,6 +22,10 @@
69296 #include <linux/gfp.h>
69297 #include <net/tcp.h>
69298
69299 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69300 +extern int grsec_lastack_retries;
69301 +#endif
69302 +
69303 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
69304 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
69305 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
69306 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
69307 }
69308 }
69309
69310 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69311 + if ((sk->sk_state == TCP_LAST_ACK) &&
69312 + (grsec_lastack_retries > 0) &&
69313 + (grsec_lastack_retries < retry_until))
69314 + retry_until = grsec_lastack_retries;
69315 +#endif
69316 +
69317 if (retransmits_timed_out(sk, retry_until,
69318 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
69319 /* Has it gone just too far? */
69320 diff -urNp linux-3.0.4/net/ipv4/udp.c linux-3.0.4/net/ipv4/udp.c
69321 --- linux-3.0.4/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
69322 +++ linux-3.0.4/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
69323 @@ -86,6 +86,7 @@
69324 #include <linux/types.h>
69325 #include <linux/fcntl.h>
69326 #include <linux/module.h>
69327 +#include <linux/security.h>
69328 #include <linux/socket.h>
69329 #include <linux/sockios.h>
69330 #include <linux/igmp.h>
69331 @@ -107,6 +108,10 @@
69332 #include <net/xfrm.h>
69333 #include "udp_impl.h"
69334
69335 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69336 +extern int grsec_enable_blackhole;
69337 +#endif
69338 +
69339 struct udp_table udp_table __read_mostly;
69340 EXPORT_SYMBOL(udp_table);
69341
69342 @@ -564,6 +569,9 @@ found:
69343 return s;
69344 }
69345
69346 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
69347 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
69348 +
69349 /*
69350 * This routine is called by the ICMP module when it gets some
69351 * sort of error condition. If err < 0 then the socket should
69352 @@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
69353 dport = usin->sin_port;
69354 if (dport == 0)
69355 return -EINVAL;
69356 +
69357 + err = gr_search_udp_sendmsg(sk, usin);
69358 + if (err)
69359 + return err;
69360 } else {
69361 if (sk->sk_state != TCP_ESTABLISHED)
69362 return -EDESTADDRREQ;
69363 +
69364 + err = gr_search_udp_sendmsg(sk, NULL);
69365 + if (err)
69366 + return err;
69367 +
69368 daddr = inet->inet_daddr;
69369 dport = inet->inet_dport;
69370 /* Open fast path for connected socket.
69371 @@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
69372 udp_lib_checksum_complete(skb)) {
69373 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
69374 IS_UDPLITE(sk));
69375 - atomic_inc(&sk->sk_drops);
69376 + atomic_inc_unchecked(&sk->sk_drops);
69377 __skb_unlink(skb, rcvq);
69378 __skb_queue_tail(&list_kill, skb);
69379 }
69380 @@ -1184,6 +1201,10 @@ try_again:
69381 if (!skb)
69382 goto out;
69383
69384 + err = gr_search_udp_recvmsg(sk, skb);
69385 + if (err)
69386 + goto out_free;
69387 +
69388 ulen = skb->len - sizeof(struct udphdr);
69389 if (len > ulen)
69390 len = ulen;
69391 @@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
69392
69393 drop:
69394 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
69395 - atomic_inc(&sk->sk_drops);
69396 + atomic_inc_unchecked(&sk->sk_drops);
69397 kfree_skb(skb);
69398 return -1;
69399 }
69400 @@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
69401 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
69402
69403 if (!skb1) {
69404 - atomic_inc(&sk->sk_drops);
69405 + atomic_inc_unchecked(&sk->sk_drops);
69406 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
69407 IS_UDPLITE(sk));
69408 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
69409 @@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
69410 goto csum_error;
69411
69412 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
69413 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69414 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69415 +#endif
69416 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
69417
69418 /*
69419 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
69420 sk_wmem_alloc_get(sp),
69421 sk_rmem_alloc_get(sp),
69422 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
69423 - atomic_read(&sp->sk_refcnt), sp,
69424 - atomic_read(&sp->sk_drops), len);
69425 + atomic_read(&sp->sk_refcnt),
69426 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69427 + NULL,
69428 +#else
69429 + sp,
69430 +#endif
69431 + atomic_read_unchecked(&sp->sk_drops), len);
69432 }
69433
69434 int udp4_seq_show(struct seq_file *seq, void *v)
69435 diff -urNp linux-3.0.4/net/ipv6/inet6_connection_sock.c linux-3.0.4/net/ipv6/inet6_connection_sock.c
69436 --- linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
69437 +++ linux-3.0.4/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
69438 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
69439 #ifdef CONFIG_XFRM
69440 {
69441 struct rt6_info *rt = (struct rt6_info *)dst;
69442 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
69443 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
69444 }
69445 #endif
69446 }
69447 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
69448 #ifdef CONFIG_XFRM
69449 if (dst) {
69450 struct rt6_info *rt = (struct rt6_info *)dst;
69451 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
69452 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
69453 __sk_dst_reset(sk);
69454 dst = NULL;
69455 }
69456 diff -urNp linux-3.0.4/net/ipv6/ipv6_sockglue.c linux-3.0.4/net/ipv6/ipv6_sockglue.c
69457 --- linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-07-21 22:17:23.000000000 -0400
69458 +++ linux-3.0.4/net/ipv6/ipv6_sockglue.c 2011-08-23 21:48:14.000000000 -0400
69459 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
69460 int val, valbool;
69461 int retv = -ENOPROTOOPT;
69462
69463 + pax_track_stack();
69464 +
69465 if (optval == NULL)
69466 val=0;
69467 else {
69468 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
69469 int len;
69470 int val;
69471
69472 + pax_track_stack();
69473 +
69474 if (ip6_mroute_opt(optname))
69475 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
69476
69477 diff -urNp linux-3.0.4/net/ipv6/raw.c linux-3.0.4/net/ipv6/raw.c
69478 --- linux-3.0.4/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
69479 +++ linux-3.0.4/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
69480 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
69481 {
69482 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
69483 skb_checksum_complete(skb)) {
69484 - atomic_inc(&sk->sk_drops);
69485 + atomic_inc_unchecked(&sk->sk_drops);
69486 kfree_skb(skb);
69487 return NET_RX_DROP;
69488 }
69489 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
69490 struct raw6_sock *rp = raw6_sk(sk);
69491
69492 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
69493 - atomic_inc(&sk->sk_drops);
69494 + atomic_inc_unchecked(&sk->sk_drops);
69495 kfree_skb(skb);
69496 return NET_RX_DROP;
69497 }
69498 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
69499
69500 if (inet->hdrincl) {
69501 if (skb_checksum_complete(skb)) {
69502 - atomic_inc(&sk->sk_drops);
69503 + atomic_inc_unchecked(&sk->sk_drops);
69504 kfree_skb(skb);
69505 return NET_RX_DROP;
69506 }
69507 @@ -601,7 +601,7 @@ out:
69508 return err;
69509 }
69510
69511 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
69512 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
69513 struct flowi6 *fl6, struct dst_entry **dstp,
69514 unsigned int flags)
69515 {
69516 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
69517 u16 proto;
69518 int err;
69519
69520 + pax_track_stack();
69521 +
69522 /* Rough check on arithmetic overflow,
69523 better check is made in ip6_append_data().
69524 */
69525 @@ -909,12 +911,15 @@ do_confirm:
69526 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
69527 char __user *optval, int optlen)
69528 {
69529 + struct icmp6_filter filter;
69530 +
69531 switch (optname) {
69532 case ICMPV6_FILTER:
69533 if (optlen > sizeof(struct icmp6_filter))
69534 optlen = sizeof(struct icmp6_filter);
69535 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
69536 + if (copy_from_user(&filter, optval, optlen))
69537 return -EFAULT;
69538 + raw6_sk(sk)->filter = filter;
69539 return 0;
69540 default:
69541 return -ENOPROTOOPT;
69542 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
69543 char __user *optval, int __user *optlen)
69544 {
69545 int len;
69546 + struct icmp6_filter filter;
69547
69548 switch (optname) {
69549 case ICMPV6_FILTER:
69550 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
69551 len = sizeof(struct icmp6_filter);
69552 if (put_user(len, optlen))
69553 return -EFAULT;
69554 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
69555 + filter = raw6_sk(sk)->filter;
69556 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
69557 return -EFAULT;
69558 return 0;
69559 default:
69560 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
69561 0, 0L, 0,
69562 sock_i_uid(sp), 0,
69563 sock_i_ino(sp),
69564 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
69565 + atomic_read(&sp->sk_refcnt),
69566 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69567 + NULL,
69568 +#else
69569 + sp,
69570 +#endif
69571 + atomic_read_unchecked(&sp->sk_drops));
69572 }
69573
69574 static int raw6_seq_show(struct seq_file *seq, void *v)
69575 diff -urNp linux-3.0.4/net/ipv6/tcp_ipv6.c linux-3.0.4/net/ipv6/tcp_ipv6.c
69576 --- linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
69577 +++ linux-3.0.4/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
69578 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
69579 }
69580 #endif
69581
69582 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69583 +extern int grsec_enable_blackhole;
69584 +#endif
69585 +
69586 static void tcp_v6_hash(struct sock *sk)
69587 {
69588 if (sk->sk_state != TCP_CLOSE) {
69589 @@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
69590 return 0;
69591
69592 reset:
69593 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69594 + if (!grsec_enable_blackhole)
69595 +#endif
69596 tcp_v6_send_reset(sk, skb);
69597 discard:
69598 if (opt_skb)
69599 @@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
69600 TCP_SKB_CB(skb)->sacked = 0;
69601
69602 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
69603 - if (!sk)
69604 + if (!sk) {
69605 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69606 + ret = 1;
69607 +#endif
69608 goto no_tcp_socket;
69609 + }
69610
69611 process:
69612 - if (sk->sk_state == TCP_TIME_WAIT)
69613 + if (sk->sk_state == TCP_TIME_WAIT) {
69614 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69615 + ret = 2;
69616 +#endif
69617 goto do_time_wait;
69618 + }
69619
69620 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
69621 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
69622 @@ -1794,6 +1809,10 @@ no_tcp_socket:
69623 bad_packet:
69624 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69625 } else {
69626 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69627 + if (!grsec_enable_blackhole || (ret == 1 &&
69628 + (skb->dev->flags & IFF_LOOPBACK)))
69629 +#endif
69630 tcp_v6_send_reset(NULL, skb);
69631 }
69632
69633 @@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
69634 uid,
69635 0, /* non standard timer */
69636 0, /* open_requests have no inode */
69637 - 0, req);
69638 + 0,
69639 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69640 + NULL
69641 +#else
69642 + req
69643 +#endif
69644 + );
69645 }
69646
69647 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
69648 @@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
69649 sock_i_uid(sp),
69650 icsk->icsk_probes_out,
69651 sock_i_ino(sp),
69652 - atomic_read(&sp->sk_refcnt), sp,
69653 + atomic_read(&sp->sk_refcnt),
69654 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69655 + NULL,
69656 +#else
69657 + sp,
69658 +#endif
69659 jiffies_to_clock_t(icsk->icsk_rto),
69660 jiffies_to_clock_t(icsk->icsk_ack.ato),
69661 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
69662 @@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
69663 dest->s6_addr32[2], dest->s6_addr32[3], destp,
69664 tw->tw_substate, 0, 0,
69665 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69666 - atomic_read(&tw->tw_refcnt), tw);
69667 + atomic_read(&tw->tw_refcnt),
69668 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69669 + NULL
69670 +#else
69671 + tw
69672 +#endif
69673 + );
69674 }
69675
69676 static int tcp6_seq_show(struct seq_file *seq, void *v)
69677 diff -urNp linux-3.0.4/net/ipv6/udp.c linux-3.0.4/net/ipv6/udp.c
69678 --- linux-3.0.4/net/ipv6/udp.c 2011-09-02 18:11:21.000000000 -0400
69679 +++ linux-3.0.4/net/ipv6/udp.c 2011-08-23 21:48:14.000000000 -0400
69680 @@ -50,6 +50,10 @@
69681 #include <linux/seq_file.h>
69682 #include "udp_impl.h"
69683
69684 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69685 +extern int grsec_enable_blackhole;
69686 +#endif
69687 +
69688 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
69689 {
69690 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
69691 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
69692
69693 return 0;
69694 drop:
69695 - atomic_inc(&sk->sk_drops);
69696 + atomic_inc_unchecked(&sk->sk_drops);
69697 drop_no_sk_drops_inc:
69698 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
69699 kfree_skb(skb);
69700 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
69701 continue;
69702 }
69703 drop:
69704 - atomic_inc(&sk->sk_drops);
69705 + atomic_inc_unchecked(&sk->sk_drops);
69706 UDP6_INC_STATS_BH(sock_net(sk),
69707 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
69708 UDP6_INC_STATS_BH(sock_net(sk),
69709 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69710 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
69711 proto == IPPROTO_UDPLITE);
69712
69713 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69714 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69715 +#endif
69716 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
69717
69718 kfree_skb(skb);
69719 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69720 if (!sock_owned_by_user(sk))
69721 udpv6_queue_rcv_skb(sk, skb);
69722 else if (sk_add_backlog(sk, skb)) {
69723 - atomic_inc(&sk->sk_drops);
69724 + atomic_inc_unchecked(&sk->sk_drops);
69725 bh_unlock_sock(sk);
69726 sock_put(sk);
69727 goto discard;
69728 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
69729 0, 0L, 0,
69730 sock_i_uid(sp), 0,
69731 sock_i_ino(sp),
69732 - atomic_read(&sp->sk_refcnt), sp,
69733 - atomic_read(&sp->sk_drops));
69734 + atomic_read(&sp->sk_refcnt),
69735 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69736 + NULL,
69737 +#else
69738 + sp,
69739 +#endif
69740 + atomic_read_unchecked(&sp->sk_drops));
69741 }
69742
69743 int udp6_seq_show(struct seq_file *seq, void *v)
69744 diff -urNp linux-3.0.4/net/irda/ircomm/ircomm_tty.c linux-3.0.4/net/irda/ircomm/ircomm_tty.c
69745 --- linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
69746 +++ linux-3.0.4/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
69747 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
69748 add_wait_queue(&self->open_wait, &wait);
69749
69750 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
69751 - __FILE__,__LINE__, tty->driver->name, self->open_count );
69752 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69753
69754 /* As far as I can see, we protect open_count - Jean II */
69755 spin_lock_irqsave(&self->spinlock, flags);
69756 if (!tty_hung_up_p(filp)) {
69757 extra_count = 1;
69758 - self->open_count--;
69759 + local_dec(&self->open_count);
69760 }
69761 spin_unlock_irqrestore(&self->spinlock, flags);
69762 - self->blocked_open++;
69763 + local_inc(&self->blocked_open);
69764
69765 while (1) {
69766 if (tty->termios->c_cflag & CBAUD) {
69767 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
69768 }
69769
69770 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
69771 - __FILE__,__LINE__, tty->driver->name, self->open_count );
69772 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69773
69774 schedule();
69775 }
69776 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
69777 if (extra_count) {
69778 /* ++ is not atomic, so this should be protected - Jean II */
69779 spin_lock_irqsave(&self->spinlock, flags);
69780 - self->open_count++;
69781 + local_inc(&self->open_count);
69782 spin_unlock_irqrestore(&self->spinlock, flags);
69783 }
69784 - self->blocked_open--;
69785 + local_dec(&self->blocked_open);
69786
69787 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
69788 - __FILE__,__LINE__, tty->driver->name, self->open_count);
69789 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
69790
69791 if (!retval)
69792 self->flags |= ASYNC_NORMAL_ACTIVE;
69793 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
69794 }
69795 /* ++ is not atomic, so this should be protected - Jean II */
69796 spin_lock_irqsave(&self->spinlock, flags);
69797 - self->open_count++;
69798 + local_inc(&self->open_count);
69799
69800 tty->driver_data = self;
69801 self->tty = tty;
69802 spin_unlock_irqrestore(&self->spinlock, flags);
69803
69804 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
69805 - self->line, self->open_count);
69806 + self->line, local_read(&self->open_count));
69807
69808 /* Not really used by us, but lets do it anyway */
69809 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69810 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
69811 return;
69812 }
69813
69814 - if ((tty->count == 1) && (self->open_count != 1)) {
69815 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69816 /*
69817 * Uh, oh. tty->count is 1, which means that the tty
69818 * structure will be freed. state->count should always
69819 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
69820 */
69821 IRDA_DEBUG(0, "%s(), bad serial port count; "
69822 "tty->count is 1, state->count is %d\n", __func__ ,
69823 - self->open_count);
69824 - self->open_count = 1;
69825 + local_read(&self->open_count));
69826 + local_set(&self->open_count, 1);
69827 }
69828
69829 - if (--self->open_count < 0) {
69830 + if (local_dec_return(&self->open_count) < 0) {
69831 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69832 - __func__, self->line, self->open_count);
69833 - self->open_count = 0;
69834 + __func__, self->line, local_read(&self->open_count));
69835 + local_set(&self->open_count, 0);
69836 }
69837 - if (self->open_count) {
69838 + if (local_read(&self->open_count)) {
69839 spin_unlock_irqrestore(&self->spinlock, flags);
69840
69841 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69842 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
69843 tty->closing = 0;
69844 self->tty = NULL;
69845
69846 - if (self->blocked_open) {
69847 + if (local_read(&self->blocked_open)) {
69848 if (self->close_delay)
69849 schedule_timeout_interruptible(self->close_delay);
69850 wake_up_interruptible(&self->open_wait);
69851 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
69852 spin_lock_irqsave(&self->spinlock, flags);
69853 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69854 self->tty = NULL;
69855 - self->open_count = 0;
69856 + local_set(&self->open_count, 0);
69857 spin_unlock_irqrestore(&self->spinlock, flags);
69858
69859 wake_up_interruptible(&self->open_wait);
69860 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
69861 seq_putc(m, '\n');
69862
69863 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69864 - seq_printf(m, "Open count: %d\n", self->open_count);
69865 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69866 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69867 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69868
69869 diff -urNp linux-3.0.4/net/iucv/af_iucv.c linux-3.0.4/net/iucv/af_iucv.c
69870 --- linux-3.0.4/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
69871 +++ linux-3.0.4/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
69872 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
69873
69874 write_lock_bh(&iucv_sk_list.lock);
69875
69876 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69877 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69878 while (__iucv_get_sock_by_name(name)) {
69879 sprintf(name, "%08x",
69880 - atomic_inc_return(&iucv_sk_list.autobind_name));
69881 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69882 }
69883
69884 write_unlock_bh(&iucv_sk_list.lock);
69885 diff -urNp linux-3.0.4/net/key/af_key.c linux-3.0.4/net/key/af_key.c
69886 --- linux-3.0.4/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
69887 +++ linux-3.0.4/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
69888 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
69889 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69890 struct xfrm_kmaddress k;
69891
69892 + pax_track_stack();
69893 +
69894 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69895 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69896 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69897 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
69898 static u32 get_acqseq(void)
69899 {
69900 u32 res;
69901 - static atomic_t acqseq;
69902 + static atomic_unchecked_t acqseq;
69903
69904 do {
69905 - res = atomic_inc_return(&acqseq);
69906 + res = atomic_inc_return_unchecked(&acqseq);
69907 } while (!res);
69908 return res;
69909 }
69910 diff -urNp linux-3.0.4/net/lapb/lapb_iface.c linux-3.0.4/net/lapb/lapb_iface.c
69911 --- linux-3.0.4/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
69912 +++ linux-3.0.4/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
69913 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
69914 goto out;
69915
69916 lapb->dev = dev;
69917 - lapb->callbacks = *callbacks;
69918 + lapb->callbacks = callbacks;
69919
69920 __lapb_insert_cb(lapb);
69921
69922 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
69923
69924 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
69925 {
69926 - if (lapb->callbacks.connect_confirmation)
69927 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
69928 + if (lapb->callbacks->connect_confirmation)
69929 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
69930 }
69931
69932 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
69933 {
69934 - if (lapb->callbacks.connect_indication)
69935 - lapb->callbacks.connect_indication(lapb->dev, reason);
69936 + if (lapb->callbacks->connect_indication)
69937 + lapb->callbacks->connect_indication(lapb->dev, reason);
69938 }
69939
69940 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
69941 {
69942 - if (lapb->callbacks.disconnect_confirmation)
69943 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
69944 + if (lapb->callbacks->disconnect_confirmation)
69945 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
69946 }
69947
69948 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
69949 {
69950 - if (lapb->callbacks.disconnect_indication)
69951 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
69952 + if (lapb->callbacks->disconnect_indication)
69953 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
69954 }
69955
69956 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
69957 {
69958 - if (lapb->callbacks.data_indication)
69959 - return lapb->callbacks.data_indication(lapb->dev, skb);
69960 + if (lapb->callbacks->data_indication)
69961 + return lapb->callbacks->data_indication(lapb->dev, skb);
69962
69963 kfree_skb(skb);
69964 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
69965 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
69966 {
69967 int used = 0;
69968
69969 - if (lapb->callbacks.data_transmit) {
69970 - lapb->callbacks.data_transmit(lapb->dev, skb);
69971 + if (lapb->callbacks->data_transmit) {
69972 + lapb->callbacks->data_transmit(lapb->dev, skb);
69973 used = 1;
69974 }
69975
69976 diff -urNp linux-3.0.4/net/mac80211/debugfs_sta.c linux-3.0.4/net/mac80211/debugfs_sta.c
69977 --- linux-3.0.4/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
69978 +++ linux-3.0.4/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
69979 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
69980 struct tid_ampdu_rx *tid_rx;
69981 struct tid_ampdu_tx *tid_tx;
69982
69983 + pax_track_stack();
69984 +
69985 rcu_read_lock();
69986
69987 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
69988 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
69989 struct sta_info *sta = file->private_data;
69990 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
69991
69992 + pax_track_stack();
69993 +
69994 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
69995 htc->ht_supported ? "" : "not ");
69996 if (htc->ht_supported) {
69997 diff -urNp linux-3.0.4/net/mac80211/ieee80211_i.h linux-3.0.4/net/mac80211/ieee80211_i.h
69998 --- linux-3.0.4/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
69999 +++ linux-3.0.4/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
70000 @@ -27,6 +27,7 @@
70001 #include <net/ieee80211_radiotap.h>
70002 #include <net/cfg80211.h>
70003 #include <net/mac80211.h>
70004 +#include <asm/local.h>
70005 #include "key.h"
70006 #include "sta_info.h"
70007
70008 @@ -721,7 +722,7 @@ struct ieee80211_local {
70009 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
70010 spinlock_t queue_stop_reason_lock;
70011
70012 - int open_count;
70013 + local_t open_count;
70014 int monitors, cooked_mntrs;
70015 /* number of interfaces with corresponding FIF_ flags */
70016 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
70017 diff -urNp linux-3.0.4/net/mac80211/iface.c linux-3.0.4/net/mac80211/iface.c
70018 --- linux-3.0.4/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
70019 +++ linux-3.0.4/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
70020 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
70021 break;
70022 }
70023
70024 - if (local->open_count == 0) {
70025 + if (local_read(&local->open_count) == 0) {
70026 res = drv_start(local);
70027 if (res)
70028 goto err_del_bss;
70029 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
70030 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
70031
70032 if (!is_valid_ether_addr(dev->dev_addr)) {
70033 - if (!local->open_count)
70034 + if (!local_read(&local->open_count))
70035 drv_stop(local);
70036 return -EADDRNOTAVAIL;
70037 }
70038 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
70039 mutex_unlock(&local->mtx);
70040
70041 if (coming_up)
70042 - local->open_count++;
70043 + local_inc(&local->open_count);
70044
70045 if (hw_reconf_flags) {
70046 ieee80211_hw_config(local, hw_reconf_flags);
70047 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
70048 err_del_interface:
70049 drv_remove_interface(local, &sdata->vif);
70050 err_stop:
70051 - if (!local->open_count)
70052 + if (!local_read(&local->open_count))
70053 drv_stop(local);
70054 err_del_bss:
70055 sdata->bss = NULL;
70056 @@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
70057 }
70058
70059 if (going_down)
70060 - local->open_count--;
70061 + local_dec(&local->open_count);
70062
70063 switch (sdata->vif.type) {
70064 case NL80211_IFTYPE_AP_VLAN:
70065 @@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
70066
70067 ieee80211_recalc_ps(local, -1);
70068
70069 - if (local->open_count == 0) {
70070 + if (local_read(&local->open_count) == 0) {
70071 if (local->ops->napi_poll)
70072 napi_disable(&local->napi);
70073 ieee80211_clear_tx_pending(local);
70074 diff -urNp linux-3.0.4/net/mac80211/main.c linux-3.0.4/net/mac80211/main.c
70075 --- linux-3.0.4/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
70076 +++ linux-3.0.4/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
70077 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
70078 local->hw.conf.power_level = power;
70079 }
70080
70081 - if (changed && local->open_count) {
70082 + if (changed && local_read(&local->open_count)) {
70083 ret = drv_config(local, changed);
70084 /*
70085 * Goal:
70086 diff -urNp linux-3.0.4/net/mac80211/mlme.c linux-3.0.4/net/mac80211/mlme.c
70087 --- linux-3.0.4/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
70088 +++ linux-3.0.4/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
70089 @@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
70090 bool have_higher_than_11mbit = false;
70091 u16 ap_ht_cap_flags;
70092
70093 + pax_track_stack();
70094 +
70095 /* AssocResp and ReassocResp have identical structure */
70096
70097 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
70098 diff -urNp linux-3.0.4/net/mac80211/pm.c linux-3.0.4/net/mac80211/pm.c
70099 --- linux-3.0.4/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
70100 +++ linux-3.0.4/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
70101 @@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
70102 cancel_work_sync(&local->dynamic_ps_enable_work);
70103 del_timer_sync(&local->dynamic_ps_timer);
70104
70105 - local->wowlan = wowlan && local->open_count;
70106 + local->wowlan = wowlan && local_read(&local->open_count);
70107 if (local->wowlan) {
70108 int err = drv_suspend(local, wowlan);
70109 if (err) {
70110 @@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
70111 }
70112
70113 /* stop hardware - this must stop RX */
70114 - if (local->open_count)
70115 + if (local_read(&local->open_count))
70116 ieee80211_stop_device(local);
70117
70118 suspend:
70119 diff -urNp linux-3.0.4/net/mac80211/rate.c linux-3.0.4/net/mac80211/rate.c
70120 --- linux-3.0.4/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
70121 +++ linux-3.0.4/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
70122 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
70123
70124 ASSERT_RTNL();
70125
70126 - if (local->open_count)
70127 + if (local_read(&local->open_count))
70128 return -EBUSY;
70129
70130 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
70131 diff -urNp linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c
70132 --- linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
70133 +++ linux-3.0.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
70134 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
70135
70136 spin_unlock_irqrestore(&events->lock, status);
70137
70138 - if (copy_to_user(buf, pb, p))
70139 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
70140 return -EFAULT;
70141
70142 return p;
70143 diff -urNp linux-3.0.4/net/mac80211/util.c linux-3.0.4/net/mac80211/util.c
70144 --- linux-3.0.4/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
70145 +++ linux-3.0.4/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
70146 @@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
70147 #endif
70148
70149 /* restart hardware */
70150 - if (local->open_count) {
70151 + if (local_read(&local->open_count)) {
70152 /*
70153 * Upon resume hardware can sometimes be goofy due to
70154 * various platform / driver / bus issues, so restarting
70155 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c
70156 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
70157 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
70158 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
70159 /* Increase the refcnt counter of the dest */
70160 atomic_inc(&dest->refcnt);
70161
70162 - conn_flags = atomic_read(&dest->conn_flags);
70163 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
70164 if (cp->protocol != IPPROTO_UDP)
70165 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
70166 /* Bind with the destination and its corresponding transmitter */
70167 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
70168 atomic_set(&cp->refcnt, 1);
70169
70170 atomic_set(&cp->n_control, 0);
70171 - atomic_set(&cp->in_pkts, 0);
70172 + atomic_set_unchecked(&cp->in_pkts, 0);
70173
70174 atomic_inc(&ipvs->conn_count);
70175 if (flags & IP_VS_CONN_F_NO_CPORT)
70176 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
70177
70178 /* Don't drop the entry if its number of incoming packets is not
70179 located in [0, 8] */
70180 - i = atomic_read(&cp->in_pkts);
70181 + i = atomic_read_unchecked(&cp->in_pkts);
70182 if (i > 8 || i < 0) return 0;
70183
70184 if (!todrop_rate[i]) return 0;
70185 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c
70186 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
70187 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
70188 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
70189 ret = cp->packet_xmit(skb, cp, pd->pp);
70190 /* do not touch skb anymore */
70191
70192 - atomic_inc(&cp->in_pkts);
70193 + atomic_inc_unchecked(&cp->in_pkts);
70194 ip_vs_conn_put(cp);
70195 return ret;
70196 }
70197 @@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
70198 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
70199 pkts = sysctl_sync_threshold(ipvs);
70200 else
70201 - pkts = atomic_add_return(1, &cp->in_pkts);
70202 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
70203
70204 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
70205 cp->protocol == IPPROTO_SCTP) {
70206 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c
70207 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
70208 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
70209 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
70210 ip_vs_rs_hash(ipvs, dest);
70211 write_unlock_bh(&ipvs->rs_lock);
70212 }
70213 - atomic_set(&dest->conn_flags, conn_flags);
70214 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
70215
70216 /* bind the service */
70217 if (!dest->svc) {
70218 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
70219 " %-7s %-6d %-10d %-10d\n",
70220 &dest->addr.in6,
70221 ntohs(dest->port),
70222 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
70223 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
70224 atomic_read(&dest->weight),
70225 atomic_read(&dest->activeconns),
70226 atomic_read(&dest->inactconns));
70227 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
70228 "%-7s %-6d %-10d %-10d\n",
70229 ntohl(dest->addr.ip),
70230 ntohs(dest->port),
70231 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
70232 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
70233 atomic_read(&dest->weight),
70234 atomic_read(&dest->activeconns),
70235 atomic_read(&dest->inactconns));
70236 @@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
70237 struct ip_vs_dest_user *udest_compat;
70238 struct ip_vs_dest_user_kern udest;
70239
70240 + pax_track_stack();
70241 +
70242 if (!capable(CAP_NET_ADMIN))
70243 return -EPERM;
70244
70245 @@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
70246
70247 entry.addr = dest->addr.ip;
70248 entry.port = dest->port;
70249 - entry.conn_flags = atomic_read(&dest->conn_flags);
70250 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
70251 entry.weight = atomic_read(&dest->weight);
70252 entry.u_threshold = dest->u_threshold;
70253 entry.l_threshold = dest->l_threshold;
70254 @@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
70255 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
70256
70257 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
70258 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
70259 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
70260 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
70261 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
70262 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
70263 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c
70264 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
70265 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
70266 @@ -648,7 +648,7 @@ control:
70267 * i.e only increment in_pkts for Templates.
70268 */
70269 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
70270 - int pkts = atomic_add_return(1, &cp->in_pkts);
70271 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
70272
70273 if (pkts % sysctl_sync_period(ipvs) != 1)
70274 return;
70275 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
70276
70277 if (opt)
70278 memcpy(&cp->in_seq, opt, sizeof(*opt));
70279 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
70280 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
70281 cp->state = state;
70282 cp->old_state = cp->state;
70283 /*
70284 diff -urNp linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c
70285 --- linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
70286 +++ linux-3.0.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
70287 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
70288 else
70289 rc = NF_ACCEPT;
70290 /* do not touch skb anymore */
70291 - atomic_inc(&cp->in_pkts);
70292 + atomic_inc_unchecked(&cp->in_pkts);
70293 goto out;
70294 }
70295
70296 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
70297 else
70298 rc = NF_ACCEPT;
70299 /* do not touch skb anymore */
70300 - atomic_inc(&cp->in_pkts);
70301 + atomic_inc_unchecked(&cp->in_pkts);
70302 goto out;
70303 }
70304
70305 diff -urNp linux-3.0.4/net/netfilter/Kconfig linux-3.0.4/net/netfilter/Kconfig
70306 --- linux-3.0.4/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
70307 +++ linux-3.0.4/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
70308 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
70309
70310 To compile it as a module, choose M here. If unsure, say N.
70311
70312 +config NETFILTER_XT_MATCH_GRADM
70313 + tristate '"gradm" match support'
70314 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
70315 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
70316 + ---help---
70317 + The gradm match allows to match on grsecurity RBAC being enabled.
70318 + It is useful when iptables rules are applied early on bootup to
70319 + prevent connections to the machine (except from a trusted host)
70320 + while the RBAC system is disabled.
70321 +
70322 config NETFILTER_XT_MATCH_HASHLIMIT
70323 tristate '"hashlimit" match support'
70324 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
70325 diff -urNp linux-3.0.4/net/netfilter/Makefile linux-3.0.4/net/netfilter/Makefile
70326 --- linux-3.0.4/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
70327 +++ linux-3.0.4/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
70328 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
70329 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
70330 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
70331 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
70332 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
70333 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
70334 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
70335 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
70336 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_log.c linux-3.0.4/net/netfilter/nfnetlink_log.c
70337 --- linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
70338 +++ linux-3.0.4/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
70339 @@ -70,7 +70,7 @@ struct nfulnl_instance {
70340 };
70341
70342 static DEFINE_SPINLOCK(instances_lock);
70343 -static atomic_t global_seq;
70344 +static atomic_unchecked_t global_seq;
70345
70346 #define INSTANCE_BUCKETS 16
70347 static struct hlist_head instance_table[INSTANCE_BUCKETS];
70348 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
70349 /* global sequence number */
70350 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
70351 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
70352 - htonl(atomic_inc_return(&global_seq)));
70353 + htonl(atomic_inc_return_unchecked(&global_seq)));
70354
70355 if (data_len) {
70356 struct nlattr *nla;
70357 diff -urNp linux-3.0.4/net/netfilter/nfnetlink_queue.c linux-3.0.4/net/netfilter/nfnetlink_queue.c
70358 --- linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
70359 +++ linux-3.0.4/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
70360 @@ -58,7 +58,7 @@ struct nfqnl_instance {
70361 */
70362 spinlock_t lock;
70363 unsigned int queue_total;
70364 - atomic_t id_sequence; /* 'sequence' of pkt ids */
70365 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
70366 struct list_head queue_list; /* packets in queue */
70367 };
70368
70369 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
70370 nfmsg->version = NFNETLINK_V0;
70371 nfmsg->res_id = htons(queue->queue_num);
70372
70373 - entry->id = atomic_inc_return(&queue->id_sequence);
70374 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
70375 pmsg.packet_id = htonl(entry->id);
70376 pmsg.hw_protocol = entskb->protocol;
70377 pmsg.hook = entry->hook;
70378 @@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
70379 inst->peer_pid, inst->queue_total,
70380 inst->copy_mode, inst->copy_range,
70381 inst->queue_dropped, inst->queue_user_dropped,
70382 - atomic_read(&inst->id_sequence), 1);
70383 + atomic_read_unchecked(&inst->id_sequence), 1);
70384 }
70385
70386 static const struct seq_operations nfqnl_seq_ops = {
70387 diff -urNp linux-3.0.4/net/netfilter/xt_gradm.c linux-3.0.4/net/netfilter/xt_gradm.c
70388 --- linux-3.0.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
70389 +++ linux-3.0.4/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
70390 @@ -0,0 +1,51 @@
70391 +/*
70392 + * gradm match for netfilter
70393 + * Copyright © Zbigniew Krzystolik, 2010
70394 + *
70395 + * This program is free software; you can redistribute it and/or modify
70396 + * it under the terms of the GNU General Public License; either version
70397 + * 2 or 3 as published by the Free Software Foundation.
70398 + */
70399 +#include <linux/module.h>
70400 +#include <linux/moduleparam.h>
70401 +#include <linux/skbuff.h>
70402 +#include <linux/netfilter/x_tables.h>
70403 +#include <linux/grsecurity.h>
70404 +#include <linux/netfilter/xt_gradm.h>
70405 +
70406 +static bool
70407 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
70408 +{
70409 + const struct xt_gradm_mtinfo *info = par->matchinfo;
70410 + bool retval = false;
70411 + if (gr_acl_is_enabled())
70412 + retval = true;
70413 + return retval ^ info->invflags;
70414 +}
70415 +
70416 +static struct xt_match gradm_mt_reg __read_mostly = {
70417 + .name = "gradm",
70418 + .revision = 0,
70419 + .family = NFPROTO_UNSPEC,
70420 + .match = gradm_mt,
70421 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
70422 + .me = THIS_MODULE,
70423 +};
70424 +
70425 +static int __init gradm_mt_init(void)
70426 +{
70427 + return xt_register_match(&gradm_mt_reg);
70428 +}
70429 +
70430 +static void __exit gradm_mt_exit(void)
70431 +{
70432 + xt_unregister_match(&gradm_mt_reg);
70433 +}
70434 +
70435 +module_init(gradm_mt_init);
70436 +module_exit(gradm_mt_exit);
70437 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
70438 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
70439 +MODULE_LICENSE("GPL");
70440 +MODULE_ALIAS("ipt_gradm");
70441 +MODULE_ALIAS("ip6t_gradm");
70442 diff -urNp linux-3.0.4/net/netfilter/xt_statistic.c linux-3.0.4/net/netfilter/xt_statistic.c
70443 --- linux-3.0.4/net/netfilter/xt_statistic.c 2011-07-21 22:17:23.000000000 -0400
70444 +++ linux-3.0.4/net/netfilter/xt_statistic.c 2011-08-23 21:47:56.000000000 -0400
70445 @@ -18,7 +18,7 @@
70446 #include <linux/netfilter/x_tables.h>
70447
70448 struct xt_statistic_priv {
70449 - atomic_t count;
70450 + atomic_unchecked_t count;
70451 } ____cacheline_aligned_in_smp;
70452
70453 MODULE_LICENSE("GPL");
70454 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
70455 break;
70456 case XT_STATISTIC_MODE_NTH:
70457 do {
70458 - oval = atomic_read(&info->master->count);
70459 + oval = atomic_read_unchecked(&info->master->count);
70460 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
70461 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
70462 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
70463 if (nval == 0)
70464 ret = !ret;
70465 break;
70466 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
70467 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
70468 if (info->master == NULL)
70469 return -ENOMEM;
70470 - atomic_set(&info->master->count, info->u.nth.count);
70471 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
70472
70473 return 0;
70474 }
70475 diff -urNp linux-3.0.4/net/netlink/af_netlink.c linux-3.0.4/net/netlink/af_netlink.c
70476 --- linux-3.0.4/net/netlink/af_netlink.c 2011-07-21 22:17:23.000000000 -0400
70477 +++ linux-3.0.4/net/netlink/af_netlink.c 2011-08-23 21:47:56.000000000 -0400
70478 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
70479 sk->sk_error_report(sk);
70480 }
70481 }
70482 - atomic_inc(&sk->sk_drops);
70483 + atomic_inc_unchecked(&sk->sk_drops);
70484 }
70485
70486 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
70487 @@ -1994,7 +1994,7 @@ static int netlink_seq_show(struct seq_f
70488 sk_wmem_alloc_get(s),
70489 nlk->cb,
70490 atomic_read(&s->sk_refcnt),
70491 - atomic_read(&s->sk_drops),
70492 + atomic_read_unchecked(&s->sk_drops),
70493 sock_i_ino(s)
70494 );
70495
70496 diff -urNp linux-3.0.4/net/netrom/af_netrom.c linux-3.0.4/net/netrom/af_netrom.c
70497 --- linux-3.0.4/net/netrom/af_netrom.c 2011-07-21 22:17:23.000000000 -0400
70498 +++ linux-3.0.4/net/netrom/af_netrom.c 2011-08-23 21:48:14.000000000 -0400
70499 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
70500 struct sock *sk = sock->sk;
70501 struct nr_sock *nr = nr_sk(sk);
70502
70503 + memset(sax, 0, sizeof(*sax));
70504 lock_sock(sk);
70505 if (peer != 0) {
70506 if (sk->sk_state != TCP_ESTABLISHED) {
70507 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
70508 *uaddr_len = sizeof(struct full_sockaddr_ax25);
70509 } else {
70510 sax->fsa_ax25.sax25_family = AF_NETROM;
70511 - sax->fsa_ax25.sax25_ndigis = 0;
70512 sax->fsa_ax25.sax25_call = nr->source_addr;
70513 *uaddr_len = sizeof(struct sockaddr_ax25);
70514 }
70515 diff -urNp linux-3.0.4/net/packet/af_packet.c linux-3.0.4/net/packet/af_packet.c
70516 --- linux-3.0.4/net/packet/af_packet.c 2011-07-21 22:17:23.000000000 -0400
70517 +++ linux-3.0.4/net/packet/af_packet.c 2011-08-23 21:47:56.000000000 -0400
70518 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
70519
70520 spin_lock(&sk->sk_receive_queue.lock);
70521 po->stats.tp_packets++;
70522 - skb->dropcount = atomic_read(&sk->sk_drops);
70523 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
70524 __skb_queue_tail(&sk->sk_receive_queue, skb);
70525 spin_unlock(&sk->sk_receive_queue.lock);
70526 sk->sk_data_ready(sk, skb->len);
70527 return 0;
70528
70529 drop_n_acct:
70530 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
70531 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
70532
70533 drop_n_restore:
70534 if (skb_head != skb->data && skb_shared(skb)) {
70535 @@ -2168,7 +2168,7 @@ static int packet_getsockopt(struct sock
70536 case PACKET_HDRLEN:
70537 if (len > sizeof(int))
70538 len = sizeof(int);
70539 - if (copy_from_user(&val, optval, len))
70540 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
70541 return -EFAULT;
70542 switch (val) {
70543 case TPACKET_V1:
70544 @@ -2206,7 +2206,7 @@ static int packet_getsockopt(struct sock
70545
70546 if (put_user(len, optlen))
70547 return -EFAULT;
70548 - if (copy_to_user(optval, data, len))
70549 + if (len > sizeof(st) || copy_to_user(optval, data, len))
70550 return -EFAULT;
70551 return 0;
70552 }
70553 diff -urNp linux-3.0.4/net/phonet/af_phonet.c linux-3.0.4/net/phonet/af_phonet.c
70554 --- linux-3.0.4/net/phonet/af_phonet.c 2011-07-21 22:17:23.000000000 -0400
70555 +++ linux-3.0.4/net/phonet/af_phonet.c 2011-08-23 21:48:14.000000000 -0400
70556 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
70557 {
70558 struct phonet_protocol *pp;
70559
70560 - if (protocol >= PHONET_NPROTO)
70561 + if (protocol < 0 || protocol >= PHONET_NPROTO)
70562 return NULL;
70563
70564 rcu_read_lock();
70565 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
70566 {
70567 int err = 0;
70568
70569 - if (protocol >= PHONET_NPROTO)
70570 + if (protocol < 0 || protocol >= PHONET_NPROTO)
70571 return -EINVAL;
70572
70573 err = proto_register(pp->prot, 1);
70574 diff -urNp linux-3.0.4/net/phonet/pep.c linux-3.0.4/net/phonet/pep.c
70575 --- linux-3.0.4/net/phonet/pep.c 2011-07-21 22:17:23.000000000 -0400
70576 +++ linux-3.0.4/net/phonet/pep.c 2011-08-23 21:47:56.000000000 -0400
70577 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
70578
70579 case PNS_PEP_CTRL_REQ:
70580 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
70581 - atomic_inc(&sk->sk_drops);
70582 + atomic_inc_unchecked(&sk->sk_drops);
70583 break;
70584 }
70585 __skb_pull(skb, 4);
70586 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
70587 }
70588
70589 if (pn->rx_credits == 0) {
70590 - atomic_inc(&sk->sk_drops);
70591 + atomic_inc_unchecked(&sk->sk_drops);
70592 err = -ENOBUFS;
70593 break;
70594 }
70595 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
70596 }
70597
70598 if (pn->rx_credits == 0) {
70599 - atomic_inc(&sk->sk_drops);
70600 + atomic_inc_unchecked(&sk->sk_drops);
70601 err = NET_RX_DROP;
70602 break;
70603 }
70604 diff -urNp linux-3.0.4/net/phonet/socket.c linux-3.0.4/net/phonet/socket.c
70605 --- linux-3.0.4/net/phonet/socket.c 2011-07-21 22:17:23.000000000 -0400
70606 +++ linux-3.0.4/net/phonet/socket.c 2011-08-23 21:48:14.000000000 -0400
70607 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
70608 pn->resource, sk->sk_state,
70609 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
70610 sock_i_uid(sk), sock_i_ino(sk),
70611 - atomic_read(&sk->sk_refcnt), sk,
70612 - atomic_read(&sk->sk_drops), &len);
70613 + atomic_read(&sk->sk_refcnt),
70614 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70615 + NULL,
70616 +#else
70617 + sk,
70618 +#endif
70619 + atomic_read_unchecked(&sk->sk_drops), &len);
70620 }
70621 seq_printf(seq, "%*s\n", 127 - len, "");
70622 return 0;
70623 diff -urNp linux-3.0.4/net/rds/cong.c linux-3.0.4/net/rds/cong.c
70624 --- linux-3.0.4/net/rds/cong.c 2011-07-21 22:17:23.000000000 -0400
70625 +++ linux-3.0.4/net/rds/cong.c 2011-08-23 21:47:56.000000000 -0400
70626 @@ -77,7 +77,7 @@
70627 * finds that the saved generation number is smaller than the global generation
70628 * number, it wakes up the process.
70629 */
70630 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
70631 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
70632
70633 /*
70634 * Congestion monitoring
70635 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
70636 rdsdebug("waking map %p for %pI4\n",
70637 map, &map->m_addr);
70638 rds_stats_inc(s_cong_update_received);
70639 - atomic_inc(&rds_cong_generation);
70640 + atomic_inc_unchecked(&rds_cong_generation);
70641 if (waitqueue_active(&map->m_waitq))
70642 wake_up(&map->m_waitq);
70643 if (waitqueue_active(&rds_poll_waitq))
70644 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
70645
70646 int rds_cong_updated_since(unsigned long *recent)
70647 {
70648 - unsigned long gen = atomic_read(&rds_cong_generation);
70649 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
70650
70651 if (likely(*recent == gen))
70652 return 0;
70653 diff -urNp linux-3.0.4/net/rds/ib_cm.c linux-3.0.4/net/rds/ib_cm.c
70654 --- linux-3.0.4/net/rds/ib_cm.c 2011-07-21 22:17:23.000000000 -0400
70655 +++ linux-3.0.4/net/rds/ib_cm.c 2011-08-23 21:47:56.000000000 -0400
70656 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
70657 /* Clear the ACK state */
70658 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
70659 #ifdef KERNEL_HAS_ATOMIC64
70660 - atomic64_set(&ic->i_ack_next, 0);
70661 + atomic64_set_unchecked(&ic->i_ack_next, 0);
70662 #else
70663 ic->i_ack_next = 0;
70664 #endif
70665 diff -urNp linux-3.0.4/net/rds/ib.h linux-3.0.4/net/rds/ib.h
70666 --- linux-3.0.4/net/rds/ib.h 2011-07-21 22:17:23.000000000 -0400
70667 +++ linux-3.0.4/net/rds/ib.h 2011-08-23 21:47:56.000000000 -0400
70668 @@ -127,7 +127,7 @@ struct rds_ib_connection {
70669 /* sending acks */
70670 unsigned long i_ack_flags;
70671 #ifdef KERNEL_HAS_ATOMIC64
70672 - atomic64_t i_ack_next; /* next ACK to send */
70673 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
70674 #else
70675 spinlock_t i_ack_lock; /* protect i_ack_next */
70676 u64 i_ack_next; /* next ACK to send */
70677 diff -urNp linux-3.0.4/net/rds/ib_recv.c linux-3.0.4/net/rds/ib_recv.c
70678 --- linux-3.0.4/net/rds/ib_recv.c 2011-07-21 22:17:23.000000000 -0400
70679 +++ linux-3.0.4/net/rds/ib_recv.c 2011-08-23 21:47:56.000000000 -0400
70680 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
70681 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
70682 int ack_required)
70683 {
70684 - atomic64_set(&ic->i_ack_next, seq);
70685 + atomic64_set_unchecked(&ic->i_ack_next, seq);
70686 if (ack_required) {
70687 smp_mb__before_clear_bit();
70688 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
70689 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
70690 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
70691 smp_mb__after_clear_bit();
70692
70693 - return atomic64_read(&ic->i_ack_next);
70694 + return atomic64_read_unchecked(&ic->i_ack_next);
70695 }
70696 #endif
70697
70698 diff -urNp linux-3.0.4/net/rds/iw_cm.c linux-3.0.4/net/rds/iw_cm.c
70699 --- linux-3.0.4/net/rds/iw_cm.c 2011-07-21 22:17:23.000000000 -0400
70700 +++ linux-3.0.4/net/rds/iw_cm.c 2011-08-23 21:47:56.000000000 -0400
70701 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
70702 /* Clear the ACK state */
70703 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
70704 #ifdef KERNEL_HAS_ATOMIC64
70705 - atomic64_set(&ic->i_ack_next, 0);
70706 + atomic64_set_unchecked(&ic->i_ack_next, 0);
70707 #else
70708 ic->i_ack_next = 0;
70709 #endif
70710 diff -urNp linux-3.0.4/net/rds/iw.h linux-3.0.4/net/rds/iw.h
70711 --- linux-3.0.4/net/rds/iw.h 2011-07-21 22:17:23.000000000 -0400
70712 +++ linux-3.0.4/net/rds/iw.h 2011-08-23 21:47:56.000000000 -0400
70713 @@ -133,7 +133,7 @@ struct rds_iw_connection {
70714 /* sending acks */
70715 unsigned long i_ack_flags;
70716 #ifdef KERNEL_HAS_ATOMIC64
70717 - atomic64_t i_ack_next; /* next ACK to send */
70718 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
70719 #else
70720 spinlock_t i_ack_lock; /* protect i_ack_next */
70721 u64 i_ack_next; /* next ACK to send */
70722 diff -urNp linux-3.0.4/net/rds/iw_rdma.c linux-3.0.4/net/rds/iw_rdma.c
70723 --- linux-3.0.4/net/rds/iw_rdma.c 2011-07-21 22:17:23.000000000 -0400
70724 +++ linux-3.0.4/net/rds/iw_rdma.c 2011-08-23 21:48:14.000000000 -0400
70725 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
70726 struct rdma_cm_id *pcm_id;
70727 int rc;
70728
70729 + pax_track_stack();
70730 +
70731 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
70732 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
70733
70734 diff -urNp linux-3.0.4/net/rds/iw_recv.c linux-3.0.4/net/rds/iw_recv.c
70735 --- linux-3.0.4/net/rds/iw_recv.c 2011-07-21 22:17:23.000000000 -0400
70736 +++ linux-3.0.4/net/rds/iw_recv.c 2011-08-23 21:47:56.000000000 -0400
70737 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
70738 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
70739 int ack_required)
70740 {
70741 - atomic64_set(&ic->i_ack_next, seq);
70742 + atomic64_set_unchecked(&ic->i_ack_next, seq);
70743 if (ack_required) {
70744 smp_mb__before_clear_bit();
70745 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
70746 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
70747 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
70748 smp_mb__after_clear_bit();
70749
70750 - return atomic64_read(&ic->i_ack_next);
70751 + return atomic64_read_unchecked(&ic->i_ack_next);
70752 }
70753 #endif
70754
70755 diff -urNp linux-3.0.4/net/rxrpc/af_rxrpc.c linux-3.0.4/net/rxrpc/af_rxrpc.c
70756 --- linux-3.0.4/net/rxrpc/af_rxrpc.c 2011-07-21 22:17:23.000000000 -0400
70757 +++ linux-3.0.4/net/rxrpc/af_rxrpc.c 2011-08-23 21:47:56.000000000 -0400
70758 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
70759 __be32 rxrpc_epoch;
70760
70761 /* current debugging ID */
70762 -atomic_t rxrpc_debug_id;
70763 +atomic_unchecked_t rxrpc_debug_id;
70764
70765 /* count of skbs currently in use */
70766 atomic_t rxrpc_n_skbs;
70767 diff -urNp linux-3.0.4/net/rxrpc/ar-ack.c linux-3.0.4/net/rxrpc/ar-ack.c
70768 --- linux-3.0.4/net/rxrpc/ar-ack.c 2011-07-21 22:17:23.000000000 -0400
70769 +++ linux-3.0.4/net/rxrpc/ar-ack.c 2011-08-23 21:48:14.000000000 -0400
70770 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
70771
70772 _enter("{%d,%d,%d,%d},",
70773 call->acks_hard, call->acks_unacked,
70774 - atomic_read(&call->sequence),
70775 + atomic_read_unchecked(&call->sequence),
70776 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
70777
70778 stop = 0;
70779 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
70780
70781 /* each Tx packet has a new serial number */
70782 sp->hdr.serial =
70783 - htonl(atomic_inc_return(&call->conn->serial));
70784 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
70785
70786 hdr = (struct rxrpc_header *) txb->head;
70787 hdr->serial = sp->hdr.serial;
70788 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
70789 */
70790 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
70791 {
70792 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
70793 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
70794 }
70795
70796 /*
70797 @@ -629,7 +629,7 @@ process_further:
70798
70799 latest = ntohl(sp->hdr.serial);
70800 hard = ntohl(ack.firstPacket);
70801 - tx = atomic_read(&call->sequence);
70802 + tx = atomic_read_unchecked(&call->sequence);
70803
70804 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
70805 latest,
70806 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
70807 u32 abort_code = RX_PROTOCOL_ERROR;
70808 u8 *acks = NULL;
70809
70810 + pax_track_stack();
70811 +
70812 //printk("\n--------------------\n");
70813 _enter("{%d,%s,%lx} [%lu]",
70814 call->debug_id, rxrpc_call_states[call->state], call->events,
70815 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
70816 goto maybe_reschedule;
70817
70818 send_ACK_with_skew:
70819 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
70820 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
70821 ntohl(ack.serial));
70822 send_ACK:
70823 mtu = call->conn->trans->peer->if_mtu;
70824 @@ -1173,7 +1175,7 @@ send_ACK:
70825 ackinfo.rxMTU = htonl(5692);
70826 ackinfo.jumbo_max = htonl(4);
70827
70828 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70829 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70830 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
70831 ntohl(hdr.serial),
70832 ntohs(ack.maxSkew),
70833 @@ -1191,7 +1193,7 @@ send_ACK:
70834 send_message:
70835 _debug("send message");
70836
70837 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70838 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70839 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
70840 send_message_2:
70841
70842 diff -urNp linux-3.0.4/net/rxrpc/ar-call.c linux-3.0.4/net/rxrpc/ar-call.c
70843 --- linux-3.0.4/net/rxrpc/ar-call.c 2011-07-21 22:17:23.000000000 -0400
70844 +++ linux-3.0.4/net/rxrpc/ar-call.c 2011-08-23 21:47:56.000000000 -0400
70845 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
70846 spin_lock_init(&call->lock);
70847 rwlock_init(&call->state_lock);
70848 atomic_set(&call->usage, 1);
70849 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
70850 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70851 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
70852
70853 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
70854 diff -urNp linux-3.0.4/net/rxrpc/ar-connection.c linux-3.0.4/net/rxrpc/ar-connection.c
70855 --- linux-3.0.4/net/rxrpc/ar-connection.c 2011-07-21 22:17:23.000000000 -0400
70856 +++ linux-3.0.4/net/rxrpc/ar-connection.c 2011-08-23 21:47:56.000000000 -0400
70857 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
70858 rwlock_init(&conn->lock);
70859 spin_lock_init(&conn->state_lock);
70860 atomic_set(&conn->usage, 1);
70861 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
70862 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70863 conn->avail_calls = RXRPC_MAXCALLS;
70864 conn->size_align = 4;
70865 conn->header_size = sizeof(struct rxrpc_header);
70866 diff -urNp linux-3.0.4/net/rxrpc/ar-connevent.c linux-3.0.4/net/rxrpc/ar-connevent.c
70867 --- linux-3.0.4/net/rxrpc/ar-connevent.c 2011-07-21 22:17:23.000000000 -0400
70868 +++ linux-3.0.4/net/rxrpc/ar-connevent.c 2011-08-23 21:47:56.000000000 -0400
70869 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
70870
70871 len = iov[0].iov_len + iov[1].iov_len;
70872
70873 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
70874 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70875 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
70876
70877 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70878 diff -urNp linux-3.0.4/net/rxrpc/ar-input.c linux-3.0.4/net/rxrpc/ar-input.c
70879 --- linux-3.0.4/net/rxrpc/ar-input.c 2011-07-21 22:17:23.000000000 -0400
70880 +++ linux-3.0.4/net/rxrpc/ar-input.c 2011-08-23 21:47:56.000000000 -0400
70881 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
70882 /* track the latest serial number on this connection for ACK packet
70883 * information */
70884 serial = ntohl(sp->hdr.serial);
70885 - hi_serial = atomic_read(&call->conn->hi_serial);
70886 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
70887 while (serial > hi_serial)
70888 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
70889 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
70890 serial);
70891
70892 /* request ACK generation for any ACK or DATA packet that requests
70893 diff -urNp linux-3.0.4/net/rxrpc/ar-internal.h linux-3.0.4/net/rxrpc/ar-internal.h
70894 --- linux-3.0.4/net/rxrpc/ar-internal.h 2011-07-21 22:17:23.000000000 -0400
70895 +++ linux-3.0.4/net/rxrpc/ar-internal.h 2011-08-23 21:47:56.000000000 -0400
70896 @@ -272,8 +272,8 @@ struct rxrpc_connection {
70897 int error; /* error code for local abort */
70898 int debug_id; /* debug ID for printks */
70899 unsigned call_counter; /* call ID counter */
70900 - atomic_t serial; /* packet serial number counter */
70901 - atomic_t hi_serial; /* highest serial number received */
70902 + atomic_unchecked_t serial; /* packet serial number counter */
70903 + atomic_unchecked_t hi_serial; /* highest serial number received */
70904 u8 avail_calls; /* number of calls available */
70905 u8 size_align; /* data size alignment (for security) */
70906 u8 header_size; /* rxrpc + security header size */
70907 @@ -346,7 +346,7 @@ struct rxrpc_call {
70908 spinlock_t lock;
70909 rwlock_t state_lock; /* lock for state transition */
70910 atomic_t usage;
70911 - atomic_t sequence; /* Tx data packet sequence counter */
70912 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
70913 u32 abort_code; /* local/remote abort code */
70914 enum { /* current state of call */
70915 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
70916 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
70917 */
70918 extern atomic_t rxrpc_n_skbs;
70919 extern __be32 rxrpc_epoch;
70920 -extern atomic_t rxrpc_debug_id;
70921 +extern atomic_unchecked_t rxrpc_debug_id;
70922 extern struct workqueue_struct *rxrpc_workqueue;
70923
70924 /*
70925 diff -urNp linux-3.0.4/net/rxrpc/ar-local.c linux-3.0.4/net/rxrpc/ar-local.c
70926 --- linux-3.0.4/net/rxrpc/ar-local.c 2011-07-21 22:17:23.000000000 -0400
70927 +++ linux-3.0.4/net/rxrpc/ar-local.c 2011-08-23 21:47:56.000000000 -0400
70928 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
70929 spin_lock_init(&local->lock);
70930 rwlock_init(&local->services_lock);
70931 atomic_set(&local->usage, 1);
70932 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
70933 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70934 memcpy(&local->srx, srx, sizeof(*srx));
70935 }
70936
70937 diff -urNp linux-3.0.4/net/rxrpc/ar-output.c linux-3.0.4/net/rxrpc/ar-output.c
70938 --- linux-3.0.4/net/rxrpc/ar-output.c 2011-07-21 22:17:23.000000000 -0400
70939 +++ linux-3.0.4/net/rxrpc/ar-output.c 2011-08-23 21:47:56.000000000 -0400
70940 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
70941 sp->hdr.cid = call->cid;
70942 sp->hdr.callNumber = call->call_id;
70943 sp->hdr.seq =
70944 - htonl(atomic_inc_return(&call->sequence));
70945 + htonl(atomic_inc_return_unchecked(&call->sequence));
70946 sp->hdr.serial =
70947 - htonl(atomic_inc_return(&conn->serial));
70948 + htonl(atomic_inc_return_unchecked(&conn->serial));
70949 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
70950 sp->hdr.userStatus = 0;
70951 sp->hdr.securityIndex = conn->security_ix;
70952 diff -urNp linux-3.0.4/net/rxrpc/ar-peer.c linux-3.0.4/net/rxrpc/ar-peer.c
70953 --- linux-3.0.4/net/rxrpc/ar-peer.c 2011-07-21 22:17:23.000000000 -0400
70954 +++ linux-3.0.4/net/rxrpc/ar-peer.c 2011-08-23 21:47:56.000000000 -0400
70955 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
70956 INIT_LIST_HEAD(&peer->error_targets);
70957 spin_lock_init(&peer->lock);
70958 atomic_set(&peer->usage, 1);
70959 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
70960 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70961 memcpy(&peer->srx, srx, sizeof(*srx));
70962
70963 rxrpc_assess_MTU_size(peer);
70964 diff -urNp linux-3.0.4/net/rxrpc/ar-proc.c linux-3.0.4/net/rxrpc/ar-proc.c
70965 --- linux-3.0.4/net/rxrpc/ar-proc.c 2011-07-21 22:17:23.000000000 -0400
70966 +++ linux-3.0.4/net/rxrpc/ar-proc.c 2011-08-23 21:47:56.000000000 -0400
70967 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
70968 atomic_read(&conn->usage),
70969 rxrpc_conn_states[conn->state],
70970 key_serial(conn->key),
70971 - atomic_read(&conn->serial),
70972 - atomic_read(&conn->hi_serial));
70973 + atomic_read_unchecked(&conn->serial),
70974 + atomic_read_unchecked(&conn->hi_serial));
70975
70976 return 0;
70977 }
70978 diff -urNp linux-3.0.4/net/rxrpc/ar-transport.c linux-3.0.4/net/rxrpc/ar-transport.c
70979 --- linux-3.0.4/net/rxrpc/ar-transport.c 2011-07-21 22:17:23.000000000 -0400
70980 +++ linux-3.0.4/net/rxrpc/ar-transport.c 2011-08-23 21:47:56.000000000 -0400
70981 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
70982 spin_lock_init(&trans->client_lock);
70983 rwlock_init(&trans->conn_lock);
70984 atomic_set(&trans->usage, 1);
70985 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
70986 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70987
70988 if (peer->srx.transport.family == AF_INET) {
70989 switch (peer->srx.transport_type) {
70990 diff -urNp linux-3.0.4/net/rxrpc/rxkad.c linux-3.0.4/net/rxrpc/rxkad.c
70991 --- linux-3.0.4/net/rxrpc/rxkad.c 2011-07-21 22:17:23.000000000 -0400
70992 +++ linux-3.0.4/net/rxrpc/rxkad.c 2011-08-23 21:48:14.000000000 -0400
70993 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
70994 u16 check;
70995 int nsg;
70996
70997 + pax_track_stack();
70998 +
70999 sp = rxrpc_skb(skb);
71000
71001 _enter("");
71002 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
71003 u16 check;
71004 int nsg;
71005
71006 + pax_track_stack();
71007 +
71008 _enter("");
71009
71010 sp = rxrpc_skb(skb);
71011 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
71012
71013 len = iov[0].iov_len + iov[1].iov_len;
71014
71015 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
71016 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
71017 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
71018
71019 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
71020 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
71021
71022 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
71023
71024 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
71025 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
71026 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
71027
71028 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
71029 diff -urNp linux-3.0.4/net/sctp/proc.c linux-3.0.4/net/sctp/proc.c
71030 --- linux-3.0.4/net/sctp/proc.c 2011-07-21 22:17:23.000000000 -0400
71031 +++ linux-3.0.4/net/sctp/proc.c 2011-08-23 21:48:14.000000000 -0400
71032 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
71033 seq_printf(seq,
71034 "%8pK %8pK %-3d %-3d %-2d %-4d "
71035 "%4d %8d %8d %7d %5lu %-5d %5d ",
71036 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
71037 + assoc, sk,
71038 + sctp_sk(sk)->type, sk->sk_state,
71039 assoc->state, hash,
71040 assoc->assoc_id,
71041 assoc->sndbuf_used,
71042 diff -urNp linux-3.0.4/net/sctp/socket.c linux-3.0.4/net/sctp/socket.c
71043 --- linux-3.0.4/net/sctp/socket.c 2011-07-21 22:17:23.000000000 -0400
71044 +++ linux-3.0.4/net/sctp/socket.c 2011-08-23 21:47:56.000000000 -0400
71045 @@ -4452,7 +4452,7 @@ static int sctp_getsockopt_peer_addrs(st
71046 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
71047 if (space_left < addrlen)
71048 return -ENOMEM;
71049 - if (copy_to_user(to, &temp, addrlen))
71050 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
71051 return -EFAULT;
71052 to += addrlen;
71053 cnt++;
71054 diff -urNp linux-3.0.4/net/socket.c linux-3.0.4/net/socket.c
71055 --- linux-3.0.4/net/socket.c 2011-09-02 18:11:21.000000000 -0400
71056 +++ linux-3.0.4/net/socket.c 2011-08-23 21:48:14.000000000 -0400
71057 @@ -88,6 +88,7 @@
71058 #include <linux/nsproxy.h>
71059 #include <linux/magic.h>
71060 #include <linux/slab.h>
71061 +#include <linux/in.h>
71062
71063 #include <asm/uaccess.h>
71064 #include <asm/unistd.h>
71065 @@ -105,6 +106,8 @@
71066 #include <linux/sockios.h>
71067 #include <linux/atalk.h>
71068
71069 +#include <linux/grsock.h>
71070 +
71071 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
71072 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
71073 unsigned long nr_segs, loff_t pos);
71074 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
71075 &sockfs_dentry_operations, SOCKFS_MAGIC);
71076 }
71077
71078 -static struct vfsmount *sock_mnt __read_mostly;
71079 +struct vfsmount *sock_mnt __read_mostly;
71080
71081 static struct file_system_type sock_fs_type = {
71082 .name = "sockfs",
71083 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
71084 return -EAFNOSUPPORT;
71085 if (type < 0 || type >= SOCK_MAX)
71086 return -EINVAL;
71087 + if (protocol < 0)
71088 + return -EINVAL;
71089
71090 /* Compatibility.
71091
71092 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
71093 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
71094 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
71095
71096 + if(!gr_search_socket(family, type, protocol)) {
71097 + retval = -EACCES;
71098 + goto out;
71099 + }
71100 +
71101 + if (gr_handle_sock_all(family, type, protocol)) {
71102 + retval = -EACCES;
71103 + goto out;
71104 + }
71105 +
71106 retval = sock_create(family, type, protocol, &sock);
71107 if (retval < 0)
71108 goto out;
71109 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
71110 if (sock) {
71111 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
71112 if (err >= 0) {
71113 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
71114 + err = -EACCES;
71115 + goto error;
71116 + }
71117 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
71118 + if (err)
71119 + goto error;
71120 +
71121 err = security_socket_bind(sock,
71122 (struct sockaddr *)&address,
71123 addrlen);
71124 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
71125 (struct sockaddr *)
71126 &address, addrlen);
71127 }
71128 +error:
71129 fput_light(sock->file, fput_needed);
71130 }
71131 return err;
71132 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
71133 if ((unsigned)backlog > somaxconn)
71134 backlog = somaxconn;
71135
71136 + if (gr_handle_sock_server_other(sock->sk)) {
71137 + err = -EPERM;
71138 + goto error;
71139 + }
71140 +
71141 + err = gr_search_listen(sock);
71142 + if (err)
71143 + goto error;
71144 +
71145 err = security_socket_listen(sock, backlog);
71146 if (!err)
71147 err = sock->ops->listen(sock, backlog);
71148
71149 +error:
71150 fput_light(sock->file, fput_needed);
71151 }
71152 return err;
71153 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
71154 newsock->type = sock->type;
71155 newsock->ops = sock->ops;
71156
71157 + if (gr_handle_sock_server_other(sock->sk)) {
71158 + err = -EPERM;
71159 + sock_release(newsock);
71160 + goto out_put;
71161 + }
71162 +
71163 + err = gr_search_accept(sock);
71164 + if (err) {
71165 + sock_release(newsock);
71166 + goto out_put;
71167 + }
71168 +
71169 /*
71170 * We don't need try_module_get here, as the listening socket (sock)
71171 * has the protocol module (sock->ops->owner) held.
71172 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
71173 fd_install(newfd, newfile);
71174 err = newfd;
71175
71176 + gr_attach_curr_ip(newsock->sk);
71177 +
71178 out_put:
71179 fput_light(sock->file, fput_needed);
71180 out:
71181 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
71182 int, addrlen)
71183 {
71184 struct socket *sock;
71185 + struct sockaddr *sck;
71186 struct sockaddr_storage address;
71187 int err, fput_needed;
71188
71189 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
71190 if (err < 0)
71191 goto out_put;
71192
71193 + sck = (struct sockaddr *)&address;
71194 +
71195 + if (gr_handle_sock_client(sck)) {
71196 + err = -EACCES;
71197 + goto out_put;
71198 + }
71199 +
71200 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
71201 + if (err)
71202 + goto out_put;
71203 +
71204 err =
71205 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
71206 if (err)
71207 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
71208 unsigned char *ctl_buf = ctl;
71209 int err, ctl_len, iov_size, total_len;
71210
71211 + pax_track_stack();
71212 +
71213 err = -EFAULT;
71214 if (MSG_CMSG_COMPAT & flags) {
71215 if (get_compat_msghdr(msg_sys, msg_compat))
71216 diff -urNp linux-3.0.4/net/sunrpc/sched.c linux-3.0.4/net/sunrpc/sched.c
71217 --- linux-3.0.4/net/sunrpc/sched.c 2011-07-21 22:17:23.000000000 -0400
71218 +++ linux-3.0.4/net/sunrpc/sched.c 2011-08-23 21:47:56.000000000 -0400
71219 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
71220 #ifdef RPC_DEBUG
71221 static void rpc_task_set_debuginfo(struct rpc_task *task)
71222 {
71223 - static atomic_t rpc_pid;
71224 + static atomic_unchecked_t rpc_pid;
71225
71226 - task->tk_pid = atomic_inc_return(&rpc_pid);
71227 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
71228 }
71229 #else
71230 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
71231 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c
71232 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-07-21 22:17:23.000000000 -0400
71233 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-23 21:47:56.000000000 -0400
71234 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
71235 static unsigned int min_max_inline = 4096;
71236 static unsigned int max_max_inline = 65536;
71237
71238 -atomic_t rdma_stat_recv;
71239 -atomic_t rdma_stat_read;
71240 -atomic_t rdma_stat_write;
71241 -atomic_t rdma_stat_sq_starve;
71242 -atomic_t rdma_stat_rq_starve;
71243 -atomic_t rdma_stat_rq_poll;
71244 -atomic_t rdma_stat_rq_prod;
71245 -atomic_t rdma_stat_sq_poll;
71246 -atomic_t rdma_stat_sq_prod;
71247 +atomic_unchecked_t rdma_stat_recv;
71248 +atomic_unchecked_t rdma_stat_read;
71249 +atomic_unchecked_t rdma_stat_write;
71250 +atomic_unchecked_t rdma_stat_sq_starve;
71251 +atomic_unchecked_t rdma_stat_rq_starve;
71252 +atomic_unchecked_t rdma_stat_rq_poll;
71253 +atomic_unchecked_t rdma_stat_rq_prod;
71254 +atomic_unchecked_t rdma_stat_sq_poll;
71255 +atomic_unchecked_t rdma_stat_sq_prod;
71256
71257 /* Temporary NFS request map and context caches */
71258 struct kmem_cache *svc_rdma_map_cachep;
71259 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
71260 len -= *ppos;
71261 if (len > *lenp)
71262 len = *lenp;
71263 - if (len && copy_to_user(buffer, str_buf, len))
71264 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
71265 return -EFAULT;
71266 *lenp = len;
71267 *ppos += len;
71268 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
71269 {
71270 .procname = "rdma_stat_read",
71271 .data = &rdma_stat_read,
71272 - .maxlen = sizeof(atomic_t),
71273 + .maxlen = sizeof(atomic_unchecked_t),
71274 .mode = 0644,
71275 .proc_handler = read_reset_stat,
71276 },
71277 {
71278 .procname = "rdma_stat_recv",
71279 .data = &rdma_stat_recv,
71280 - .maxlen = sizeof(atomic_t),
71281 + .maxlen = sizeof(atomic_unchecked_t),
71282 .mode = 0644,
71283 .proc_handler = read_reset_stat,
71284 },
71285 {
71286 .procname = "rdma_stat_write",
71287 .data = &rdma_stat_write,
71288 - .maxlen = sizeof(atomic_t),
71289 + .maxlen = sizeof(atomic_unchecked_t),
71290 .mode = 0644,
71291 .proc_handler = read_reset_stat,
71292 },
71293 {
71294 .procname = "rdma_stat_sq_starve",
71295 .data = &rdma_stat_sq_starve,
71296 - .maxlen = sizeof(atomic_t),
71297 + .maxlen = sizeof(atomic_unchecked_t),
71298 .mode = 0644,
71299 .proc_handler = read_reset_stat,
71300 },
71301 {
71302 .procname = "rdma_stat_rq_starve",
71303 .data = &rdma_stat_rq_starve,
71304 - .maxlen = sizeof(atomic_t),
71305 + .maxlen = sizeof(atomic_unchecked_t),
71306 .mode = 0644,
71307 .proc_handler = read_reset_stat,
71308 },
71309 {
71310 .procname = "rdma_stat_rq_poll",
71311 .data = &rdma_stat_rq_poll,
71312 - .maxlen = sizeof(atomic_t),
71313 + .maxlen = sizeof(atomic_unchecked_t),
71314 .mode = 0644,
71315 .proc_handler = read_reset_stat,
71316 },
71317 {
71318 .procname = "rdma_stat_rq_prod",
71319 .data = &rdma_stat_rq_prod,
71320 - .maxlen = sizeof(atomic_t),
71321 + .maxlen = sizeof(atomic_unchecked_t),
71322 .mode = 0644,
71323 .proc_handler = read_reset_stat,
71324 },
71325 {
71326 .procname = "rdma_stat_sq_poll",
71327 .data = &rdma_stat_sq_poll,
71328 - .maxlen = sizeof(atomic_t),
71329 + .maxlen = sizeof(atomic_unchecked_t),
71330 .mode = 0644,
71331 .proc_handler = read_reset_stat,
71332 },
71333 {
71334 .procname = "rdma_stat_sq_prod",
71335 .data = &rdma_stat_sq_prod,
71336 - .maxlen = sizeof(atomic_t),
71337 + .maxlen = sizeof(atomic_unchecked_t),
71338 .mode = 0644,
71339 .proc_handler = read_reset_stat,
71340 },
71341 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
71342 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-07-21 22:17:23.000000000 -0400
71343 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-23 21:47:56.000000000 -0400
71344 @@ -499,7 +499,7 @@ next_sge:
71345 svc_rdma_put_context(ctxt, 0);
71346 goto out;
71347 }
71348 - atomic_inc(&rdma_stat_read);
71349 + atomic_inc_unchecked(&rdma_stat_read);
71350
71351 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
71352 chl_map->ch[ch_no].count -= read_wr.num_sge;
71353 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
71354 dto_q);
71355 list_del_init(&ctxt->dto_q);
71356 } else {
71357 - atomic_inc(&rdma_stat_rq_starve);
71358 + atomic_inc_unchecked(&rdma_stat_rq_starve);
71359 clear_bit(XPT_DATA, &xprt->xpt_flags);
71360 ctxt = NULL;
71361 }
71362 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
71363 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
71364 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
71365 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
71366 - atomic_inc(&rdma_stat_recv);
71367 + atomic_inc_unchecked(&rdma_stat_recv);
71368
71369 /* Build up the XDR from the receive buffers. */
71370 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
71371 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
71372 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-07-21 22:17:23.000000000 -0400
71373 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-23 21:47:56.000000000 -0400
71374 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
71375 write_wr.wr.rdma.remote_addr = to;
71376
71377 /* Post It */
71378 - atomic_inc(&rdma_stat_write);
71379 + atomic_inc_unchecked(&rdma_stat_write);
71380 if (svc_rdma_send(xprt, &write_wr))
71381 goto err;
71382 return 0;
71383 diff -urNp linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
71384 --- linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-07-21 22:17:23.000000000 -0400
71385 +++ linux-3.0.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-23 21:47:56.000000000 -0400
71386 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
71387 return;
71388
71389 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
71390 - atomic_inc(&rdma_stat_rq_poll);
71391 + atomic_inc_unchecked(&rdma_stat_rq_poll);
71392
71393 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
71394 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
71395 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
71396 }
71397
71398 if (ctxt)
71399 - atomic_inc(&rdma_stat_rq_prod);
71400 + atomic_inc_unchecked(&rdma_stat_rq_prod);
71401
71402 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
71403 /*
71404 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
71405 return;
71406
71407 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
71408 - atomic_inc(&rdma_stat_sq_poll);
71409 + atomic_inc_unchecked(&rdma_stat_sq_poll);
71410 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
71411 if (wc.status != IB_WC_SUCCESS)
71412 /* Close the transport */
71413 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
71414 }
71415
71416 if (ctxt)
71417 - atomic_inc(&rdma_stat_sq_prod);
71418 + atomic_inc_unchecked(&rdma_stat_sq_prod);
71419 }
71420
71421 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
71422 @@ -1272,7 +1272,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
71423 spin_lock_bh(&xprt->sc_lock);
71424 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
71425 spin_unlock_bh(&xprt->sc_lock);
71426 - atomic_inc(&rdma_stat_sq_starve);
71427 + atomic_inc_unchecked(&rdma_stat_sq_starve);
71428
71429 /* See if we can opportunistically reap SQ WR to make room */
71430 sq_cq_reap(xprt);
71431 diff -urNp linux-3.0.4/net/sysctl_net.c linux-3.0.4/net/sysctl_net.c
71432 --- linux-3.0.4/net/sysctl_net.c 2011-07-21 22:17:23.000000000 -0400
71433 +++ linux-3.0.4/net/sysctl_net.c 2011-08-23 21:48:14.000000000 -0400
71434 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
71435 struct ctl_table *table)
71436 {
71437 /* Allow network administrator to have same access as root. */
71438 - if (capable(CAP_NET_ADMIN)) {
71439 + if (capable_nolog(CAP_NET_ADMIN)) {
71440 int mode = (table->mode >> 6) & 7;
71441 return (mode << 6) | (mode << 3) | mode;
71442 }
71443 diff -urNp linux-3.0.4/net/unix/af_unix.c linux-3.0.4/net/unix/af_unix.c
71444 --- linux-3.0.4/net/unix/af_unix.c 2011-07-21 22:17:23.000000000 -0400
71445 +++ linux-3.0.4/net/unix/af_unix.c 2011-08-23 21:48:14.000000000 -0400
71446 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
71447 err = -ECONNREFUSED;
71448 if (!S_ISSOCK(inode->i_mode))
71449 goto put_fail;
71450 +
71451 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
71452 + err = -EACCES;
71453 + goto put_fail;
71454 + }
71455 +
71456 u = unix_find_socket_byinode(inode);
71457 if (!u)
71458 goto put_fail;
71459 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
71460 if (u) {
71461 struct dentry *dentry;
71462 dentry = unix_sk(u)->dentry;
71463 +
71464 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
71465 + err = -EPERM;
71466 + sock_put(u);
71467 + goto fail;
71468 + }
71469 +
71470 if (dentry)
71471 touch_atime(unix_sk(u)->mnt, dentry);
71472 } else
71473 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
71474 err = security_path_mknod(&nd.path, dentry, mode, 0);
71475 if (err)
71476 goto out_mknod_drop_write;
71477 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
71478 + err = -EACCES;
71479 + goto out_mknod_drop_write;
71480 + }
71481 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
71482 out_mknod_drop_write:
71483 mnt_drop_write(nd.path.mnt);
71484 if (err)
71485 goto out_mknod_dput;
71486 +
71487 + gr_handle_create(dentry, nd.path.mnt);
71488 +
71489 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
71490 dput(nd.path.dentry);
71491 nd.path.dentry = dentry;
71492 diff -urNp linux-3.0.4/net/wireless/core.h linux-3.0.4/net/wireless/core.h
71493 --- linux-3.0.4/net/wireless/core.h 2011-07-21 22:17:23.000000000 -0400
71494 +++ linux-3.0.4/net/wireless/core.h 2011-08-23 21:47:56.000000000 -0400
71495 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
71496 struct mutex mtx;
71497
71498 /* rfkill support */
71499 - struct rfkill_ops rfkill_ops;
71500 + rfkill_ops_no_const rfkill_ops;
71501 struct rfkill *rfkill;
71502 struct work_struct rfkill_sync;
71503
71504 diff -urNp linux-3.0.4/net/wireless/wext-core.c linux-3.0.4/net/wireless/wext-core.c
71505 --- linux-3.0.4/net/wireless/wext-core.c 2011-07-21 22:17:23.000000000 -0400
71506 +++ linux-3.0.4/net/wireless/wext-core.c 2011-08-23 21:47:56.000000000 -0400
71507 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
71508 */
71509
71510 /* Support for very large requests */
71511 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
71512 - (user_length > descr->max_tokens)) {
71513 + if (user_length > descr->max_tokens) {
71514 /* Allow userspace to GET more than max so
71515 * we can support any size GET requests.
71516 * There is still a limit : -ENOMEM.
71517 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
71518 }
71519 }
71520
71521 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
71522 - /*
71523 - * If this is a GET, but not NOMAX, it means that the extra
71524 - * data is not bounded by userspace, but by max_tokens. Thus
71525 - * set the length to max_tokens. This matches the extra data
71526 - * allocation.
71527 - * The driver should fill it with the number of tokens it
71528 - * provided, and it may check iwp->length rather than having
71529 - * knowledge of max_tokens. If the driver doesn't change the
71530 - * iwp->length, this ioctl just copies back max_token tokens
71531 - * filled with zeroes. Hopefully the driver isn't claiming
71532 - * them to be valid data.
71533 - */
71534 - iwp->length = descr->max_tokens;
71535 - }
71536 -
71537 err = handler(dev, info, (union iwreq_data *) iwp, extra);
71538
71539 iwp->length += essid_compat;
71540 diff -urNp linux-3.0.4/net/xfrm/xfrm_policy.c linux-3.0.4/net/xfrm/xfrm_policy.c
71541 --- linux-3.0.4/net/xfrm/xfrm_policy.c 2011-07-21 22:17:23.000000000 -0400
71542 +++ linux-3.0.4/net/xfrm/xfrm_policy.c 2011-08-23 21:47:56.000000000 -0400
71543 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
71544 {
71545 policy->walk.dead = 1;
71546
71547 - atomic_inc(&policy->genid);
71548 + atomic_inc_unchecked(&policy->genid);
71549
71550 if (del_timer(&policy->timer))
71551 xfrm_pol_put(policy);
71552 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
71553 hlist_add_head(&policy->bydst, chain);
71554 xfrm_pol_hold(policy);
71555 net->xfrm.policy_count[dir]++;
71556 - atomic_inc(&flow_cache_genid);
71557 + atomic_inc_unchecked(&flow_cache_genid);
71558 if (delpol)
71559 __xfrm_policy_unlink(delpol, dir);
71560 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
71561 @@ -1528,7 +1528,7 @@ free_dst:
71562 goto out;
71563 }
71564
71565 -static int inline
71566 +static inline int
71567 xfrm_dst_alloc_copy(void **target, const void *src, int size)
71568 {
71569 if (!*target) {
71570 @@ -1540,7 +1540,7 @@ xfrm_dst_alloc_copy(void **target, const
71571 return 0;
71572 }
71573
71574 -static int inline
71575 +static inline int
71576 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
71577 {
71578 #ifdef CONFIG_XFRM_SUB_POLICY
71579 @@ -1552,7 +1552,7 @@ xfrm_dst_update_parent(struct dst_entry
71580 #endif
71581 }
71582
71583 -static int inline
71584 +static inline int
71585 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
71586 {
71587 #ifdef CONFIG_XFRM_SUB_POLICY
71588 @@ -1646,7 +1646,7 @@ xfrm_resolve_and_create_bundle(struct xf
71589
71590 xdst->num_pols = num_pols;
71591 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
71592 - xdst->policy_genid = atomic_read(&pols[0]->genid);
71593 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
71594
71595 return xdst;
71596 }
71597 @@ -2333,7 +2333,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
71598 if (xdst->xfrm_genid != dst->xfrm->genid)
71599 return 0;
71600 if (xdst->num_pols > 0 &&
71601 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
71602 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
71603 return 0;
71604
71605 mtu = dst_mtu(dst->child);
71606 @@ -2861,7 +2861,7 @@ static int xfrm_policy_migrate(struct xf
71607 sizeof(pol->xfrm_vec[i].saddr));
71608 pol->xfrm_vec[i].encap_family = mp->new_family;
71609 /* flush bundles */
71610 - atomic_inc(&pol->genid);
71611 + atomic_inc_unchecked(&pol->genid);
71612 }
71613 }
71614
71615 diff -urNp linux-3.0.4/net/xfrm/xfrm_user.c linux-3.0.4/net/xfrm/xfrm_user.c
71616 --- linux-3.0.4/net/xfrm/xfrm_user.c 2011-07-21 22:17:23.000000000 -0400
71617 +++ linux-3.0.4/net/xfrm/xfrm_user.c 2011-08-23 21:48:14.000000000 -0400
71618 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
71619 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
71620 int i;
71621
71622 + pax_track_stack();
71623 +
71624 if (xp->xfrm_nr == 0)
71625 return 0;
71626
71627 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
71628 int err;
71629 int n = 0;
71630
71631 + pax_track_stack();
71632 +
71633 if (attrs[XFRMA_MIGRATE] == NULL)
71634 return -EINVAL;
71635
71636 diff -urNp linux-3.0.4/scripts/basic/fixdep.c linux-3.0.4/scripts/basic/fixdep.c
71637 --- linux-3.0.4/scripts/basic/fixdep.c 2011-07-21 22:17:23.000000000 -0400
71638 +++ linux-3.0.4/scripts/basic/fixdep.c 2011-08-23 21:47:56.000000000 -0400
71639 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
71640
71641 static void parse_config_file(const char *map, size_t len)
71642 {
71643 - const int *end = (const int *) (map + len);
71644 + const unsigned int *end = (const unsigned int *) (map + len);
71645 /* start at +1, so that p can never be < map */
71646 - const int *m = (const int *) map + 1;
71647 + const unsigned int *m = (const unsigned int *) map + 1;
71648 const char *p, *q;
71649
71650 for (; m < end; m++) {
71651 @@ -405,7 +405,7 @@ static void print_deps(void)
71652 static void traps(void)
71653 {
71654 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
71655 - int *p = (int *)test;
71656 + unsigned int *p = (unsigned int *)test;
71657
71658 if (*p != INT_CONF) {
71659 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
71660 diff -urNp linux-3.0.4/scripts/gcc-plugin.sh linux-3.0.4/scripts/gcc-plugin.sh
71661 --- linux-3.0.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
71662 +++ linux-3.0.4/scripts/gcc-plugin.sh 2011-09-14 09:08:05.000000000 -0400
71663 @@ -0,0 +1,2 @@
71664 +#!/bin/sh
71665 +echo "#include \"gcc-plugin.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
71666 diff -urNp linux-3.0.4/scripts/Makefile.build linux-3.0.4/scripts/Makefile.build
71667 --- linux-3.0.4/scripts/Makefile.build 2011-07-21 22:17:23.000000000 -0400
71668 +++ linux-3.0.4/scripts/Makefile.build 2011-08-23 21:47:56.000000000 -0400
71669 @@ -109,7 +109,7 @@ endif
71670 endif
71671
71672 # Do not include host rules unless needed
71673 -ifneq ($(hostprogs-y)$(hostprogs-m),)
71674 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
71675 include scripts/Makefile.host
71676 endif
71677
71678 diff -urNp linux-3.0.4/scripts/Makefile.clean linux-3.0.4/scripts/Makefile.clean
71679 --- linux-3.0.4/scripts/Makefile.clean 2011-07-21 22:17:23.000000000 -0400
71680 +++ linux-3.0.4/scripts/Makefile.clean 2011-08-23 21:47:56.000000000 -0400
71681 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
71682 __clean-files := $(extra-y) $(always) \
71683 $(targets) $(clean-files) \
71684 $(host-progs) \
71685 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
71686 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
71687 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
71688
71689 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
71690
71691 diff -urNp linux-3.0.4/scripts/Makefile.host linux-3.0.4/scripts/Makefile.host
71692 --- linux-3.0.4/scripts/Makefile.host 2011-07-21 22:17:23.000000000 -0400
71693 +++ linux-3.0.4/scripts/Makefile.host 2011-08-23 21:47:56.000000000 -0400
71694 @@ -31,6 +31,7 @@
71695 # Note: Shared libraries consisting of C++ files are not supported
71696
71697 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
71698 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
71699
71700 # C code
71701 # Executables compiled from a single .c file
71702 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
71703 # Shared libaries (only .c supported)
71704 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
71705 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
71706 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
71707 # Remove .so files from "xxx-objs"
71708 host-cobjs := $(filter-out %.so,$(host-cobjs))
71709
71710 diff -urNp linux-3.0.4/scripts/mod/file2alias.c linux-3.0.4/scripts/mod/file2alias.c
71711 --- linux-3.0.4/scripts/mod/file2alias.c 2011-07-21 22:17:23.000000000 -0400
71712 +++ linux-3.0.4/scripts/mod/file2alias.c 2011-08-23 21:47:56.000000000 -0400
71713 @@ -72,7 +72,7 @@ static void device_id_check(const char *
71714 unsigned long size, unsigned long id_size,
71715 void *symval)
71716 {
71717 - int i;
71718 + unsigned int i;
71719
71720 if (size % id_size || size < id_size) {
71721 if (cross_build != 0)
71722 @@ -102,7 +102,7 @@ static void device_id_check(const char *
71723 /* USB is special because the bcdDevice can be matched against a numeric range */
71724 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
71725 static void do_usb_entry(struct usb_device_id *id,
71726 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
71727 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
71728 unsigned char range_lo, unsigned char range_hi,
71729 unsigned char max, struct module *mod)
71730 {
71731 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
71732 for (i = 0; i < count; i++) {
71733 const char *id = (char *)devs[i].id;
71734 char acpi_id[sizeof(devs[0].id)];
71735 - int j;
71736 + unsigned int j;
71737
71738 buf_printf(&mod->dev_table_buf,
71739 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
71740 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
71741
71742 for (j = 0; j < PNP_MAX_DEVICES; j++) {
71743 const char *id = (char *)card->devs[j].id;
71744 - int i2, j2;
71745 + unsigned int i2, j2;
71746 int dup = 0;
71747
71748 if (!id[0])
71749 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
71750 /* add an individual alias for every device entry */
71751 if (!dup) {
71752 char acpi_id[sizeof(card->devs[0].id)];
71753 - int k;
71754 + unsigned int k;
71755
71756 buf_printf(&mod->dev_table_buf,
71757 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
71758 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
71759 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
71760 char *alias)
71761 {
71762 - int i, j;
71763 + unsigned int i, j;
71764
71765 sprintf(alias, "dmi*");
71766
71767 diff -urNp linux-3.0.4/scripts/mod/modpost.c linux-3.0.4/scripts/mod/modpost.c
71768 --- linux-3.0.4/scripts/mod/modpost.c 2011-07-21 22:17:23.000000000 -0400
71769 +++ linux-3.0.4/scripts/mod/modpost.c 2011-08-23 21:47:56.000000000 -0400
71770 @@ -892,6 +892,7 @@ enum mismatch {
71771 ANY_INIT_TO_ANY_EXIT,
71772 ANY_EXIT_TO_ANY_INIT,
71773 EXPORT_TO_INIT_EXIT,
71774 + DATA_TO_TEXT
71775 };
71776
71777 struct sectioncheck {
71778 @@ -1000,6 +1001,12 @@ const struct sectioncheck sectioncheck[]
71779 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
71780 .mismatch = EXPORT_TO_INIT_EXIT,
71781 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
71782 +},
71783 +/* Do not reference code from writable data */
71784 +{
71785 + .fromsec = { DATA_SECTIONS, NULL },
71786 + .tosec = { TEXT_SECTIONS, NULL },
71787 + .mismatch = DATA_TO_TEXT
71788 }
71789 };
71790
71791 @@ -1122,10 +1129,10 @@ static Elf_Sym *find_elf_symbol(struct e
71792 continue;
71793 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
71794 continue;
71795 - if (sym->st_value == addr)
71796 - return sym;
71797 /* Find a symbol nearby - addr are maybe negative */
71798 d = sym->st_value - addr;
71799 + if (d == 0)
71800 + return sym;
71801 if (d < 0)
71802 d = addr - sym->st_value;
71803 if (d < distance) {
71804 @@ -1404,6 +1411,14 @@ static void report_sec_mismatch(const ch
71805 tosym, prl_to, prl_to, tosym);
71806 free(prl_to);
71807 break;
71808 + case DATA_TO_TEXT:
71809 +/*
71810 + fprintf(stderr,
71811 + "The variable %s references\n"
71812 + "the %s %s%s%s\n",
71813 + fromsym, to, sec2annotation(tosec), tosym, to_p);
71814 +*/
71815 + break;
71816 }
71817 fprintf(stderr, "\n");
71818 }
71819 @@ -1629,7 +1644,7 @@ static void section_rel(const char *modn
71820 static void check_sec_ref(struct module *mod, const char *modname,
71821 struct elf_info *elf)
71822 {
71823 - int i;
71824 + unsigned int i;
71825 Elf_Shdr *sechdrs = elf->sechdrs;
71826
71827 /* Walk through all sections */
71828 @@ -1727,7 +1742,7 @@ void __attribute__((format(printf, 2, 3)
71829 va_end(ap);
71830 }
71831
71832 -void buf_write(struct buffer *buf, const char *s, int len)
71833 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
71834 {
71835 if (buf->size - buf->pos < len) {
71836 buf->size += len + SZ;
71837 @@ -1939,7 +1954,7 @@ static void write_if_changed(struct buff
71838 if (fstat(fileno(file), &st) < 0)
71839 goto close_write;
71840
71841 - if (st.st_size != b->pos)
71842 + if (st.st_size != (off_t)b->pos)
71843 goto close_write;
71844
71845 tmp = NOFAIL(malloc(b->pos));
71846 diff -urNp linux-3.0.4/scripts/mod/modpost.h linux-3.0.4/scripts/mod/modpost.h
71847 --- linux-3.0.4/scripts/mod/modpost.h 2011-07-21 22:17:23.000000000 -0400
71848 +++ linux-3.0.4/scripts/mod/modpost.h 2011-08-23 21:47:56.000000000 -0400
71849 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
71850
71851 struct buffer {
71852 char *p;
71853 - int pos;
71854 - int size;
71855 + unsigned int pos;
71856 + unsigned int size;
71857 };
71858
71859 void __attribute__((format(printf, 2, 3)))
71860 buf_printf(struct buffer *buf, const char *fmt, ...);
71861
71862 void
71863 -buf_write(struct buffer *buf, const char *s, int len);
71864 +buf_write(struct buffer *buf, const char *s, unsigned int len);
71865
71866 struct module {
71867 struct module *next;
71868 diff -urNp linux-3.0.4/scripts/mod/sumversion.c linux-3.0.4/scripts/mod/sumversion.c
71869 --- linux-3.0.4/scripts/mod/sumversion.c 2011-07-21 22:17:23.000000000 -0400
71870 +++ linux-3.0.4/scripts/mod/sumversion.c 2011-08-23 21:47:56.000000000 -0400
71871 @@ -470,7 +470,7 @@ static void write_version(const char *fi
71872 goto out;
71873 }
71874
71875 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
71876 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
71877 warn("writing sum in %s failed: %s\n",
71878 filename, strerror(errno));
71879 goto out;
71880 diff -urNp linux-3.0.4/scripts/pnmtologo.c linux-3.0.4/scripts/pnmtologo.c
71881 --- linux-3.0.4/scripts/pnmtologo.c 2011-07-21 22:17:23.000000000 -0400
71882 +++ linux-3.0.4/scripts/pnmtologo.c 2011-08-23 21:47:56.000000000 -0400
71883 @@ -237,14 +237,14 @@ static void write_header(void)
71884 fprintf(out, " * Linux logo %s\n", logoname);
71885 fputs(" */\n\n", out);
71886 fputs("#include <linux/linux_logo.h>\n\n", out);
71887 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
71888 + fprintf(out, "static unsigned char %s_data[] = {\n",
71889 logoname);
71890 }
71891
71892 static void write_footer(void)
71893 {
71894 fputs("\n};\n\n", out);
71895 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
71896 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
71897 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
71898 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
71899 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
71900 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
71901 fputs("\n};\n\n", out);
71902
71903 /* write logo clut */
71904 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
71905 + fprintf(out, "static unsigned char %s_clut[] = {\n",
71906 logoname);
71907 write_hex_cnt = 0;
71908 for (i = 0; i < logo_clutsize; i++) {
71909 diff -urNp linux-3.0.4/security/apparmor/lsm.c linux-3.0.4/security/apparmor/lsm.c
71910 --- linux-3.0.4/security/apparmor/lsm.c 2011-09-02 18:11:21.000000000 -0400
71911 +++ linux-3.0.4/security/apparmor/lsm.c 2011-08-23 21:48:14.000000000 -0400
71912 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
71913 return error;
71914 }
71915
71916 -static struct security_operations apparmor_ops = {
71917 +static struct security_operations apparmor_ops __read_only = {
71918 .name = "apparmor",
71919
71920 .ptrace_access_check = apparmor_ptrace_access_check,
71921 diff -urNp linux-3.0.4/security/commoncap.c linux-3.0.4/security/commoncap.c
71922 --- linux-3.0.4/security/commoncap.c 2011-07-21 22:17:23.000000000 -0400
71923 +++ linux-3.0.4/security/commoncap.c 2011-08-23 21:48:14.000000000 -0400
71924 @@ -28,6 +28,7 @@
71925 #include <linux/prctl.h>
71926 #include <linux/securebits.h>
71927 #include <linux/user_namespace.h>
71928 +#include <net/sock.h>
71929
71930 /*
71931 * If a non-root user executes a setuid-root binary in
71932 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
71933
71934 int cap_netlink_recv(struct sk_buff *skb, int cap)
71935 {
71936 - if (!cap_raised(current_cap(), cap))
71937 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
71938 return -EPERM;
71939 return 0;
71940 }
71941 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
71942 {
71943 const struct cred *cred = current_cred();
71944
71945 + if (gr_acl_enable_at_secure())
71946 + return 1;
71947 +
71948 if (cred->uid != 0) {
71949 if (bprm->cap_effective)
71950 return 1;
71951 diff -urNp linux-3.0.4/security/integrity/ima/ima_api.c linux-3.0.4/security/integrity/ima/ima_api.c
71952 --- linux-3.0.4/security/integrity/ima/ima_api.c 2011-07-21 22:17:23.000000000 -0400
71953 +++ linux-3.0.4/security/integrity/ima/ima_api.c 2011-08-23 21:47:56.000000000 -0400
71954 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
71955 int result;
71956
71957 /* can overflow, only indicator */
71958 - atomic_long_inc(&ima_htable.violations);
71959 + atomic_long_inc_unchecked(&ima_htable.violations);
71960
71961 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
71962 if (!entry) {
71963 diff -urNp linux-3.0.4/security/integrity/ima/ima_fs.c linux-3.0.4/security/integrity/ima/ima_fs.c
71964 --- linux-3.0.4/security/integrity/ima/ima_fs.c 2011-07-21 22:17:23.000000000 -0400
71965 +++ linux-3.0.4/security/integrity/ima/ima_fs.c 2011-08-23 21:47:56.000000000 -0400
71966 @@ -28,12 +28,12 @@
71967 static int valid_policy = 1;
71968 #define TMPBUFLEN 12
71969 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
71970 - loff_t *ppos, atomic_long_t *val)
71971 + loff_t *ppos, atomic_long_unchecked_t *val)
71972 {
71973 char tmpbuf[TMPBUFLEN];
71974 ssize_t len;
71975
71976 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
71977 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
71978 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
71979 }
71980
71981 diff -urNp linux-3.0.4/security/integrity/ima/ima.h linux-3.0.4/security/integrity/ima/ima.h
71982 --- linux-3.0.4/security/integrity/ima/ima.h 2011-07-21 22:17:23.000000000 -0400
71983 +++ linux-3.0.4/security/integrity/ima/ima.h 2011-08-23 21:47:56.000000000 -0400
71984 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
71985 extern spinlock_t ima_queue_lock;
71986
71987 struct ima_h_table {
71988 - atomic_long_t len; /* number of stored measurements in the list */
71989 - atomic_long_t violations;
71990 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
71991 + atomic_long_unchecked_t violations;
71992 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
71993 };
71994 extern struct ima_h_table ima_htable;
71995 diff -urNp linux-3.0.4/security/integrity/ima/ima_queue.c linux-3.0.4/security/integrity/ima/ima_queue.c
71996 --- linux-3.0.4/security/integrity/ima/ima_queue.c 2011-07-21 22:17:23.000000000 -0400
71997 +++ linux-3.0.4/security/integrity/ima/ima_queue.c 2011-08-23 21:47:56.000000000 -0400
71998 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
71999 INIT_LIST_HEAD(&qe->later);
72000 list_add_tail_rcu(&qe->later, &ima_measurements);
72001
72002 - atomic_long_inc(&ima_htable.len);
72003 + atomic_long_inc_unchecked(&ima_htable.len);
72004 key = ima_hash_key(entry->digest);
72005 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
72006 return 0;
72007 diff -urNp linux-3.0.4/security/Kconfig linux-3.0.4/security/Kconfig
72008 --- linux-3.0.4/security/Kconfig 2011-07-21 22:17:23.000000000 -0400
72009 +++ linux-3.0.4/security/Kconfig 2011-09-17 00:58:04.000000000 -0400
72010 @@ -4,6 +4,558 @@
72011
72012 menu "Security options"
72013
72014 +source grsecurity/Kconfig
72015 +
72016 +menu "PaX"
72017 +
72018 + config ARCH_TRACK_EXEC_LIMIT
72019 + bool
72020 +
72021 + config PAX_KERNEXEC_PLUGIN
72022 + bool
72023 +
72024 + config PAX_PER_CPU_PGD
72025 + bool
72026 +
72027 + config TASK_SIZE_MAX_SHIFT
72028 + int
72029 + depends on X86_64
72030 + default 47 if !PAX_PER_CPU_PGD
72031 + default 42 if PAX_PER_CPU_PGD
72032 +
72033 + config PAX_ENABLE_PAE
72034 + bool
72035 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
72036 +
72037 +config PAX
72038 + bool "Enable various PaX features"
72039 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
72040 + help
72041 + This allows you to enable various PaX features. PaX adds
72042 + intrusion prevention mechanisms to the kernel that reduce
72043 + the risks posed by exploitable memory corruption bugs.
72044 +
72045 +menu "PaX Control"
72046 + depends on PAX
72047 +
72048 +config PAX_SOFTMODE
72049 + bool 'Support soft mode'
72050 + select PAX_PT_PAX_FLAGS
72051 + help
72052 + Enabling this option will allow you to run PaX in soft mode, that
72053 + is, PaX features will not be enforced by default, only on executables
72054 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
72055 + is the only way to mark executables for soft mode use.
72056 +
72057 + Soft mode can be activated by using the "pax_softmode=1" kernel command
72058 + line option on boot. Furthermore you can control various PaX features
72059 + at runtime via the entries in /proc/sys/kernel/pax.
72060 +
72061 +config PAX_EI_PAX
72062 + bool 'Use legacy ELF header marking'
72063 + help
72064 + Enabling this option will allow you to control PaX features on
72065 + a per executable basis via the 'chpax' utility available at
72066 + http://pax.grsecurity.net/. The control flags will be read from
72067 + an otherwise reserved part of the ELF header. This marking has
72068 + numerous drawbacks (no support for soft-mode, toolchain does not
72069 + know about the non-standard use of the ELF header) therefore it
72070 + has been deprecated in favour of PT_PAX_FLAGS support.
72071 +
72072 + Note that if you enable PT_PAX_FLAGS marking support as well,
72073 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
72074 +
72075 +config PAX_PT_PAX_FLAGS
72076 + bool 'Use ELF program header marking'
72077 + help
72078 + Enabling this option will allow you to control PaX features on
72079 + a per executable basis via the 'paxctl' utility available at
72080 + http://pax.grsecurity.net/. The control flags will be read from
72081 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
72082 + has the benefits of supporting both soft mode and being fully
72083 + integrated into the toolchain (the binutils patch is available
72084 + from http://pax.grsecurity.net).
72085 +
72086 + If your toolchain does not support PT_PAX_FLAGS markings,
72087 + you can create one in most cases with 'paxctl -C'.
72088 +
72089 + Note that if you enable the legacy EI_PAX marking support as well,
72090 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
72091 +
72092 +choice
72093 + prompt 'MAC system integration'
72094 + default PAX_HAVE_ACL_FLAGS
72095 + help
72096 + Mandatory Access Control systems have the option of controlling
72097 + PaX flags on a per executable basis, choose the method supported
72098 + by your particular system.
72099 +
72100 + - "none": if your MAC system does not interact with PaX,
72101 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
72102 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
72103 +
72104 + NOTE: this option is for developers/integrators only.
72105 +
72106 + config PAX_NO_ACL_FLAGS
72107 + bool 'none'
72108 +
72109 + config PAX_HAVE_ACL_FLAGS
72110 + bool 'direct'
72111 +
72112 + config PAX_HOOK_ACL_FLAGS
72113 + bool 'hook'
72114 +endchoice
72115 +
72116 +endmenu
72117 +
72118 +menu "Non-executable pages"
72119 + depends on PAX
72120 +
72121 +config PAX_NOEXEC
72122 + bool "Enforce non-executable pages"
72123 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
72124 + help
72125 + By design some architectures do not allow for protecting memory
72126 + pages against execution or even if they do, Linux does not make
72127 + use of this feature. In practice this means that if a page is
72128 + readable (such as the stack or heap) it is also executable.
72129 +
72130 + There is a well known exploit technique that makes use of this
72131 + fact and a common programming mistake where an attacker can
72132 + introduce code of his choice somewhere in the attacked program's
72133 + memory (typically the stack or the heap) and then execute it.
72134 +
72135 + If the attacked program was running with different (typically
72136 + higher) privileges than that of the attacker, then he can elevate
72137 + his own privilege level (e.g. get a root shell, write to files for
72138 + which he does not have write access to, etc).
72139 +
72140 + Enabling this option will let you choose from various features
72141 + that prevent the injection and execution of 'foreign' code in
72142 + a program.
72143 +
72144 + This will also break programs that rely on the old behaviour and
72145 + expect that dynamically allocated memory via the malloc() family
72146 + of functions is executable (which it is not). Notable examples
72147 + are the XFree86 4.x server, the java runtime and wine.
72148 +
72149 +config PAX_PAGEEXEC
72150 + bool "Paging based non-executable pages"
72151 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
72152 + select S390_SWITCH_AMODE if S390
72153 + select S390_EXEC_PROTECT if S390
72154 + select ARCH_TRACK_EXEC_LIMIT if X86_32
72155 + help
72156 + This implementation is based on the paging feature of the CPU.
72157 + On i386 without hardware non-executable bit support there is a
72158 + variable but usually low performance impact, however on Intel's
72159 + P4 core based CPUs it is very high so you should not enable this
72160 + for kernels meant to be used on such CPUs.
72161 +
72162 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
72163 + with hardware non-executable bit support there is no performance
72164 + impact, on ppc the impact is negligible.
72165 +
72166 + Note that several architectures require various emulations due to
72167 + badly designed userland ABIs, this will cause a performance impact
72168 + but will disappear as soon as userland is fixed. For example, ppc
72169 + userland MUST have been built with secure-plt by a recent toolchain.
72170 +
72171 +config PAX_SEGMEXEC
72172 + bool "Segmentation based non-executable pages"
72173 + depends on PAX_NOEXEC && X86_32
72174 + help
72175 + This implementation is based on the segmentation feature of the
72176 + CPU and has a very small performance impact, however applications
72177 + will be limited to a 1.5 GB address space instead of the normal
72178 + 3 GB.
72179 +
72180 +config PAX_EMUTRAMP
72181 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
72182 + default y if PARISC
72183 + help
72184 + There are some programs and libraries that for one reason or
72185 + another attempt to execute special small code snippets from
72186 + non-executable memory pages. Most notable examples are the
72187 + signal handler return code generated by the kernel itself and
72188 + the GCC trampolines.
72189 +
72190 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
72191 + such programs will no longer work under your kernel.
72192 +
72193 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
72194 + utilities to enable trampoline emulation for the affected programs
72195 + yet still have the protection provided by the non-executable pages.
72196 +
72197 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
72198 + your system will not even boot.
72199 +
72200 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
72201 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
72202 + for the affected files.
72203 +
72204 + NOTE: enabling this feature *may* open up a loophole in the
72205 + protection provided by non-executable pages that an attacker
72206 + could abuse. Therefore the best solution is to not have any
72207 + files on your system that would require this option. This can
72208 + be achieved by not using libc5 (which relies on the kernel
72209 + signal handler return code) and not using or rewriting programs
72210 + that make use of the nested function implementation of GCC.
72211 + Skilled users can just fix GCC itself so that it implements
72212 + nested function calls in a way that does not interfere with PaX.
72213 +
72214 +config PAX_EMUSIGRT
72215 + bool "Automatically emulate sigreturn trampolines"
72216 + depends on PAX_EMUTRAMP && PARISC
72217 + default y
72218 + help
72219 + Enabling this option will have the kernel automatically detect
72220 + and emulate signal return trampolines executing on the stack
72221 + that would otherwise lead to task termination.
72222 +
72223 + This solution is intended as a temporary one for users with
72224 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
72225 + Modula-3 runtime, etc) or executables linked to such, basically
72226 + everything that does not specify its own SA_RESTORER function in
72227 + normal executable memory like glibc 2.1+ does.
72228 +
72229 + On parisc you MUST enable this option, otherwise your system will
72230 + not even boot.
72231 +
72232 + NOTE: this feature cannot be disabled on a per executable basis
72233 + and since it *does* open up a loophole in the protection provided
72234 + by non-executable pages, the best solution is to not have any
72235 + files on your system that would require this option.
72236 +
72237 +config PAX_MPROTECT
72238 + bool "Restrict mprotect()"
72239 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
72240 + help
72241 + Enabling this option will prevent programs from
72242 + - changing the executable status of memory pages that were
72243 + not originally created as executable,
72244 + - making read-only executable pages writable again,
72245 + - creating executable pages from anonymous memory,
72246 + - making read-only-after-relocations (RELRO) data pages writable again.
72247 +
72248 + You should say Y here to complete the protection provided by
72249 + the enforcement of non-executable pages.
72250 +
72251 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
72252 + this feature on a per file basis.
72253 +
72254 +config PAX_MPROTECT_COMPAT
72255 + bool "Use legacy/compat protection demoting (read help)"
72256 + depends on PAX_MPROTECT
72257 + default n
72258 + help
72259 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
72260 + by sending the proper error code to the application. For some broken
72261 + userland, this can cause problems with Python or other applications. The
72262 + current implementation however allows for applications like clamav to
72263 + detect if JIT compilation/execution is allowed and to fall back gracefully
72264 + to an interpreter-based mode if it does not. While we encourage everyone
72265 + to use the current implementation as-is and push upstream to fix broken
72266 + userland (note that the RWX logging option can assist with this), in some
72267 + environments this may not be possible. Having to disable MPROTECT
72268 + completely on certain binaries reduces the security benefit of PaX,
72269 + so this option is provided for those environments to revert to the old
72270 + behavior.
72271 +
72272 +config PAX_ELFRELOCS
72273 + bool "Allow ELF text relocations (read help)"
72274 + depends on PAX_MPROTECT
72275 + default n
72276 + help
72277 + Non-executable pages and mprotect() restrictions are effective
72278 + in preventing the introduction of new executable code into an
72279 + attacked task's address space. There remain only two venues
72280 + for this kind of attack: if the attacker can execute already
72281 + existing code in the attacked task then he can either have it
72282 + create and mmap() a file containing his code or have it mmap()
72283 + an already existing ELF library that does not have position
72284 + independent code in it and use mprotect() on it to make it
72285 + writable and copy his code there. While protecting against
72286 + the former approach is beyond PaX, the latter can be prevented
72287 + by having only PIC ELF libraries on one's system (which do not
72288 + need to relocate their code). If you are sure this is your case,
72289 + as is the case with all modern Linux distributions, then leave
72290 + this option disabled. You should say 'n' here.
72291 +
72292 +config PAX_ETEXECRELOCS
72293 + bool "Allow ELF ET_EXEC text relocations"
72294 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
72295 + select PAX_ELFRELOCS
72296 + default y
72297 + help
72298 + On some architectures there are incorrectly created applications
72299 + that require text relocations and would not work without enabling
72300 + this option. If you are an alpha, ia64 or parisc user, you should
72301 + enable this option and disable it once you have made sure that
72302 + none of your applications need it.
72303 +
72304 +config PAX_EMUPLT
72305 + bool "Automatically emulate ELF PLT"
72306 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
72307 + default y
72308 + help
72309 + Enabling this option will have the kernel automatically detect
72310 + and emulate the Procedure Linkage Table entries in ELF files.
72311 + On some architectures such entries are in writable memory, and
72312 + become non-executable leading to task termination. Therefore
72313 + it is mandatory that you enable this option on alpha, parisc,
72314 + sparc and sparc64, otherwise your system would not even boot.
72315 +
72316 + NOTE: this feature *does* open up a loophole in the protection
72317 + provided by the non-executable pages, therefore the proper
72318 + solution is to modify the toolchain to produce a PLT that does
72319 + not need to be writable.
72320 +
72321 +config PAX_DLRESOLVE
72322 + bool 'Emulate old glibc resolver stub'
72323 + depends on PAX_EMUPLT && SPARC
72324 + default n
72325 + help
72326 + This option is needed if userland has an old glibc (before 2.4)
72327 + that puts a 'save' instruction into the runtime generated resolver
72328 + stub that needs special emulation.
72329 +
72330 +config PAX_KERNEXEC
72331 + bool "Enforce non-executable kernel pages"
72332 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
72333 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
72334 + select PAX_KERNEXEC_PLUGIN if X86_64
72335 + help
72336 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
72337 + that is, enabling this option will make it harder to inject
72338 + and execute 'foreign' code in kernel memory itself.
72339 +
72340 + Note that on x86_64 kernels there is a known regression when
72341 + this feature and KVM/VMX are both enabled in the host kernel.
72342 +
72343 +config PAX_KERNEXEC_MODULE_TEXT
72344 + int "Minimum amount of memory reserved for module code"
72345 + default "4"
72346 + depends on PAX_KERNEXEC && X86_32 && MODULES
72347 + help
72348 + Due to implementation details the kernel must reserve a fixed
72349 + amount of memory for module code at compile time that cannot be
72350 + changed at runtime. Here you can specify the minimum amount
72351 + in MB that will be reserved. Due to the same implementation
72352 + details this size will always be rounded up to the next 2/4 MB
72353 + boundary (depends on PAE) so the actually available memory for
72354 + module code will usually be more than this minimum.
72355 +
72356 + The default 4 MB should be enough for most users but if you have
72357 + an excessive number of modules (e.g., most distribution configs
72358 + compile many drivers as modules) or use huge modules such as
72359 + nvidia's kernel driver, you will need to adjust this amount.
72360 + A good rule of thumb is to look at your currently loaded kernel
72361 + modules and add up their sizes.
72362 +
72363 +endmenu
72364 +
72365 +menu "Address Space Layout Randomization"
72366 + depends on PAX
72367 +
72368 +config PAX_ASLR
72369 + bool "Address Space Layout Randomization"
72370 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
72371 + help
72372 + Many if not most exploit techniques rely on the knowledge of
72373 + certain addresses in the attacked program. The following options
72374 + will allow the kernel to apply a certain amount of randomization
72375 + to specific parts of the program thereby forcing an attacker to
72376 + guess them in most cases. Any failed guess will most likely crash
72377 + the attacked program which allows the kernel to detect such attempts
72378 + and react on them. PaX itself provides no reaction mechanisms,
72379 + instead it is strongly encouraged that you make use of Nergal's
72380 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
72381 + (http://www.grsecurity.net/) built-in crash detection features or
72382 + develop one yourself.
72383 +
72384 + By saying Y here you can choose to randomize the following areas:
72385 + - top of the task's kernel stack
72386 + - top of the task's userland stack
72387 + - base address for mmap() requests that do not specify one
72388 + (this includes all libraries)
72389 + - base address of the main executable
72390 +
72391 + It is strongly recommended to say Y here as address space layout
72392 + randomization has negligible impact on performance yet it provides
72393 + a very effective protection.
72394 +
72395 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
72396 + this feature on a per file basis.
72397 +
72398 +config PAX_RANDKSTACK
72399 + bool "Randomize kernel stack base"
72400 + depends on PAX_ASLR && X86_TSC && X86
72401 + help
72402 + By saying Y here the kernel will randomize every task's kernel
72403 + stack on every system call. This will not only force an attacker
72404 + to guess it but also prevent him from making use of possible
72405 + leaked information about it.
72406 +
72407 + Since the kernel stack is a rather scarce resource, randomization
72408 + may cause unexpected stack overflows, therefore you should very
72409 + carefully test your system. Note that once enabled in the kernel
72410 + configuration, this feature cannot be disabled on a per file basis.
72411 +
72412 +config PAX_RANDUSTACK
72413 + bool "Randomize user stack base"
72414 + depends on PAX_ASLR
72415 + help
72416 + By saying Y here the kernel will randomize every task's userland
72417 + stack. The randomization is done in two steps where the second
72418 + one may apply a big amount of shift to the top of the stack and
72419 + cause problems for programs that want to use lots of memory (more
72420 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
72421 + For this reason the second step can be controlled by 'chpax' or
72422 + 'paxctl' on a per file basis.
72423 +
72424 +config PAX_RANDMMAP
72425 + bool "Randomize mmap() base"
72426 + depends on PAX_ASLR
72427 + help
72428 + By saying Y here the kernel will use a randomized base address for
72429 + mmap() requests that do not specify one themselves. As a result
72430 + all dynamically loaded libraries will appear at random addresses
72431 + and therefore be harder to exploit by a technique where an attacker
72432 + attempts to execute library code for his purposes (e.g. spawn a
72433 + shell from an exploited program that is running at an elevated
72434 + privilege level).
72435 +
72436 + Furthermore, if a program is relinked as a dynamic ELF file, its
72437 + base address will be randomized as well, completing the full
72438 + randomization of the address space layout. Attacking such programs
72439 + becomes a guess game. You can find an example of doing this at
72440 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
72441 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
72442 +
72443 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
72444 + feature on a per file basis.
72445 +
72446 +endmenu
72447 +
72448 +menu "Miscellaneous hardening features"
72449 +
72450 +config PAX_MEMORY_SANITIZE
72451 + bool "Sanitize all freed memory"
72452 + help
72453 + By saying Y here the kernel will erase memory pages as soon as they
72454 + are freed. This in turn reduces the lifetime of data stored in the
72455 + pages, making it less likely that sensitive information such as
72456 + passwords, cryptographic secrets, etc stay in memory for too long.
72457 +
72458 + This is especially useful for programs whose runtime is short, long
72459 + lived processes and the kernel itself benefit from this as long as
72460 + they operate on whole memory pages and ensure timely freeing of pages
72461 + that may hold sensitive information.
72462 +
72463 + The tradeoff is performance impact, on a single CPU system kernel
72464 + compilation sees a 3% slowdown, other systems and workloads may vary
72465 + and you are advised to test this feature on your expected workload
72466 + before deploying it.
72467 +
72468 + Note that this feature does not protect data stored in live pages,
72469 + e.g., process memory swapped to disk may stay there for a long time.
72470 +
72471 +config PAX_MEMORY_STACKLEAK
72472 + bool "Sanitize kernel stack"
72473 + depends on X86
72474 + help
72475 + By saying Y here the kernel will erase the kernel stack before it
72476 + returns from a system call. This in turn reduces the information
72477 + that a kernel stack leak bug can reveal.
72478 +
72479 + Note that such a bug can still leak information that was put on
72480 + the stack by the current system call (the one eventually triggering
72481 + the bug) but traces of earlier system calls on the kernel stack
72482 + cannot leak anymore.
72483 +
72484 + The tradeoff is performance impact: on a single CPU system kernel
72485 + compilation sees a 1% slowdown, other systems and workloads may vary
72486 + and you are advised to test this feature on your expected workload
72487 + before deploying it.
72488 +
72489 + Note: full support for this feature requires gcc with plugin support
72490 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
72491 + is not supported). Using older gcc versions means that functions
72492 + with large enough stack frames may leave uninitialized memory behind
72493 + that may be exposed to a later syscall leaking the stack.
72494 +
72495 +config PAX_MEMORY_UDEREF
72496 + bool "Prevent invalid userland pointer dereference"
72497 + depends on X86 && !UML_X86 && !XEN
72498 + select PAX_PER_CPU_PGD if X86_64
72499 + help
72500 + By saying Y here the kernel will be prevented from dereferencing
72501 + userland pointers in contexts where the kernel expects only kernel
72502 + pointers. This is both a useful runtime debugging feature and a
72503 + security measure that prevents exploiting a class of kernel bugs.
72504 +
72505 + The tradeoff is that some virtualization solutions may experience
72506 + a huge slowdown and therefore you should not enable this feature
72507 + for kernels meant to run in such environments. Whether a given VM
72508 + solution is affected or not is best determined by simply trying it
72509 + out, the performance impact will be obvious right on boot as this
72510 + mechanism engages from very early on. A good rule of thumb is that
72511 + VMs running on CPUs without hardware virtualization support (i.e.,
72512 + the majority of IA-32 CPUs) will likely experience the slowdown.
72513 +
72514 +config PAX_REFCOUNT
72515 + bool "Prevent various kernel object reference counter overflows"
72516 + depends on GRKERNSEC && (X86 || SPARC64)
72517 + help
72518 + By saying Y here the kernel will detect and prevent overflowing
72519 + various (but not all) kinds of object reference counters. Such
72520 + overflows can normally occur due to bugs only and are often, if
72521 + not always, exploitable.
72522 +
72523 + The tradeoff is that data structures protected by an overflowed
72524 + refcount will never be freed and therefore will leak memory. Note
72525 + that this leak also happens even without this protection but in
72526 + that case the overflow can eventually trigger the freeing of the
72527 + data structure while it is still being used elsewhere, resulting
72528 + in the exploitable situation that this feature prevents.
72529 +
72530 + Since this has a negligible performance impact, you should enable
72531 + this feature.
72532 +
72533 +config PAX_USERCOPY
72534 + bool "Harden heap object copies between kernel and userland"
72535 + depends on X86 || PPC || SPARC || ARM
72536 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
72537 + help
72538 + By saying Y here the kernel will enforce the size of heap objects
72539 + when they are copied in either direction between the kernel and
72540 + userland, even if only a part of the heap object is copied.
72541 +
72542 + Specifically, this checking prevents information leaking from the
72543 + kernel heap during kernel to userland copies (if the kernel heap
72544 + object is otherwise fully initialized) and prevents kernel heap
72545 + overflows during userland to kernel copies.
72546 +
72547 + Note that the current implementation provides the strictest bounds
72548 + checks for the SLUB allocator.
72549 +
72550 + Enabling this option also enables per-slab cache protection against
72551 + data in a given cache being copied into/out of via userland
72552 + accessors. Though the whitelist of regions will be reduced over
72553 + time, it notably protects important data structures like task structs.
72554 +
72555 + If frame pointers are enabled on x86, this option will also restrict
72556 + copies into and out of the kernel stack to local variables within a
72557 + single frame.
72558 +
72559 + Since this has a negligible performance impact, you should enable
72560 + this feature.
72561 +
72562 +endmenu
72563 +
72564 +endmenu
72565 +
72566 config KEYS
72567 bool "Enable access key retention support"
72568 help
72569 @@ -167,7 +719,7 @@ config INTEL_TXT
72570 config LSM_MMAP_MIN_ADDR
72571 int "Low address space for LSM to protect from user allocation"
72572 depends on SECURITY && SECURITY_SELINUX
72573 - default 32768 if ARM
72574 + default 32768 if ALPHA || ARM || PARISC || SPARC32
72575 default 65536
72576 help
72577 This is the portion of low virtual memory which should be protected
72578 diff -urNp linux-3.0.4/security/keys/keyring.c linux-3.0.4/security/keys/keyring.c
72579 --- linux-3.0.4/security/keys/keyring.c 2011-07-21 22:17:23.000000000 -0400
72580 +++ linux-3.0.4/security/keys/keyring.c 2011-08-23 21:47:56.000000000 -0400
72581 @@ -215,15 +215,15 @@ static long keyring_read(const struct ke
72582 ret = -EFAULT;
72583
72584 for (loop = 0; loop < klist->nkeys; loop++) {
72585 + key_serial_t serial;
72586 key = klist->keys[loop];
72587 + serial = key->serial;
72588
72589 tmp = sizeof(key_serial_t);
72590 if (tmp > buflen)
72591 tmp = buflen;
72592
72593 - if (copy_to_user(buffer,
72594 - &key->serial,
72595 - tmp) != 0)
72596 + if (copy_to_user(buffer, &serial, tmp))
72597 goto error;
72598
72599 buflen -= tmp;
72600 diff -urNp linux-3.0.4/security/min_addr.c linux-3.0.4/security/min_addr.c
72601 --- linux-3.0.4/security/min_addr.c 2011-07-21 22:17:23.000000000 -0400
72602 +++ linux-3.0.4/security/min_addr.c 2011-08-23 21:48:14.000000000 -0400
72603 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
72604 */
72605 static void update_mmap_min_addr(void)
72606 {
72607 +#ifndef SPARC
72608 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
72609 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
72610 mmap_min_addr = dac_mmap_min_addr;
72611 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
72612 #else
72613 mmap_min_addr = dac_mmap_min_addr;
72614 #endif
72615 +#endif
72616 }
72617
72618 /*
72619 diff -urNp linux-3.0.4/security/security.c linux-3.0.4/security/security.c
72620 --- linux-3.0.4/security/security.c 2011-07-21 22:17:23.000000000 -0400
72621 +++ linux-3.0.4/security/security.c 2011-08-23 21:48:14.000000000 -0400
72622 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
72623 /* things that live in capability.c */
72624 extern void __init security_fixup_ops(struct security_operations *ops);
72625
72626 -static struct security_operations *security_ops;
72627 -static struct security_operations default_security_ops = {
72628 +static struct security_operations *security_ops __read_only;
72629 +static struct security_operations default_security_ops __read_only = {
72630 .name = "default",
72631 };
72632
72633 @@ -67,7 +67,9 @@ int __init security_init(void)
72634
72635 void reset_security_ops(void)
72636 {
72637 + pax_open_kernel();
72638 security_ops = &default_security_ops;
72639 + pax_close_kernel();
72640 }
72641
72642 /* Save user chosen LSM */
72643 diff -urNp linux-3.0.4/security/selinux/hooks.c linux-3.0.4/security/selinux/hooks.c
72644 --- linux-3.0.4/security/selinux/hooks.c 2011-07-21 22:17:23.000000000 -0400
72645 +++ linux-3.0.4/security/selinux/hooks.c 2011-08-23 21:48:14.000000000 -0400
72646 @@ -93,7 +93,6 @@
72647 #define NUM_SEL_MNT_OPTS 5
72648
72649 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
72650 -extern struct security_operations *security_ops;
72651
72652 /* SECMARK reference count */
72653 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
72654 @@ -5454,7 +5453,7 @@ static int selinux_key_getsecurity(struc
72655
72656 #endif
72657
72658 -static struct security_operations selinux_ops = {
72659 +static struct security_operations selinux_ops __read_only = {
72660 .name = "selinux",
72661
72662 .ptrace_access_check = selinux_ptrace_access_check,
72663 diff -urNp linux-3.0.4/security/selinux/include/xfrm.h linux-3.0.4/security/selinux/include/xfrm.h
72664 --- linux-3.0.4/security/selinux/include/xfrm.h 2011-07-21 22:17:23.000000000 -0400
72665 +++ linux-3.0.4/security/selinux/include/xfrm.h 2011-08-23 21:47:56.000000000 -0400
72666 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
72667
72668 static inline void selinux_xfrm_notify_policyload(void)
72669 {
72670 - atomic_inc(&flow_cache_genid);
72671 + atomic_inc_unchecked(&flow_cache_genid);
72672 }
72673 #else
72674 static inline int selinux_xfrm_enabled(void)
72675 diff -urNp linux-3.0.4/security/selinux/ss/services.c linux-3.0.4/security/selinux/ss/services.c
72676 --- linux-3.0.4/security/selinux/ss/services.c 2011-07-21 22:17:23.000000000 -0400
72677 +++ linux-3.0.4/security/selinux/ss/services.c 2011-08-23 21:48:14.000000000 -0400
72678 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
72679 int rc = 0;
72680 struct policy_file file = { data, len }, *fp = &file;
72681
72682 + pax_track_stack();
72683 +
72684 if (!ss_initialized) {
72685 avtab_cache_init();
72686 rc = policydb_read(&policydb, fp);
72687 diff -urNp linux-3.0.4/security/smack/smack_lsm.c linux-3.0.4/security/smack/smack_lsm.c
72688 --- linux-3.0.4/security/smack/smack_lsm.c 2011-07-21 22:17:23.000000000 -0400
72689 +++ linux-3.0.4/security/smack/smack_lsm.c 2011-08-23 21:47:56.000000000 -0400
72690 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct
72691 return 0;
72692 }
72693
72694 -struct security_operations smack_ops = {
72695 +struct security_operations smack_ops __read_only = {
72696 .name = "smack",
72697
72698 .ptrace_access_check = smack_ptrace_access_check,
72699 diff -urNp linux-3.0.4/security/tomoyo/tomoyo.c linux-3.0.4/security/tomoyo/tomoyo.c
72700 --- linux-3.0.4/security/tomoyo/tomoyo.c 2011-07-21 22:17:23.000000000 -0400
72701 +++ linux-3.0.4/security/tomoyo/tomoyo.c 2011-08-23 21:47:56.000000000 -0400
72702 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
72703 * tomoyo_security_ops is a "struct security_operations" which is used for
72704 * registering TOMOYO.
72705 */
72706 -static struct security_operations tomoyo_security_ops = {
72707 +static struct security_operations tomoyo_security_ops __read_only = {
72708 .name = "tomoyo",
72709 .cred_alloc_blank = tomoyo_cred_alloc_blank,
72710 .cred_prepare = tomoyo_cred_prepare,
72711 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.c linux-3.0.4/sound/aoa/codecs/onyx.c
72712 --- linux-3.0.4/sound/aoa/codecs/onyx.c 2011-07-21 22:17:23.000000000 -0400
72713 +++ linux-3.0.4/sound/aoa/codecs/onyx.c 2011-08-23 21:47:56.000000000 -0400
72714 @@ -54,7 +54,7 @@ struct onyx {
72715 spdif_locked:1,
72716 analog_locked:1,
72717 original_mute:2;
72718 - int open_count;
72719 + local_t open_count;
72720 struct codec_info *codec_info;
72721
72722 /* mutex serializes concurrent access to the device
72723 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
72724 struct onyx *onyx = cii->codec_data;
72725
72726 mutex_lock(&onyx->mutex);
72727 - onyx->open_count++;
72728 + local_inc(&onyx->open_count);
72729 mutex_unlock(&onyx->mutex);
72730
72731 return 0;
72732 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
72733 struct onyx *onyx = cii->codec_data;
72734
72735 mutex_lock(&onyx->mutex);
72736 - onyx->open_count--;
72737 - if (!onyx->open_count)
72738 + if (local_dec_and_test(&onyx->open_count))
72739 onyx->spdif_locked = onyx->analog_locked = 0;
72740 mutex_unlock(&onyx->mutex);
72741
72742 diff -urNp linux-3.0.4/sound/aoa/codecs/onyx.h linux-3.0.4/sound/aoa/codecs/onyx.h
72743 --- linux-3.0.4/sound/aoa/codecs/onyx.h 2011-07-21 22:17:23.000000000 -0400
72744 +++ linux-3.0.4/sound/aoa/codecs/onyx.h 2011-08-23 21:47:56.000000000 -0400
72745 @@ -11,6 +11,7 @@
72746 #include <linux/i2c.h>
72747 #include <asm/pmac_low_i2c.h>
72748 #include <asm/prom.h>
72749 +#include <asm/local.h>
72750
72751 /* PCM3052 register definitions */
72752
72753 diff -urNp linux-3.0.4/sound/core/seq/seq_device.c linux-3.0.4/sound/core/seq/seq_device.c
72754 --- linux-3.0.4/sound/core/seq/seq_device.c 2011-07-21 22:17:23.000000000 -0400
72755 +++ linux-3.0.4/sound/core/seq/seq_device.c 2011-08-23 21:47:56.000000000 -0400
72756 @@ -63,7 +63,7 @@ struct ops_list {
72757 int argsize; /* argument size */
72758
72759 /* operators */
72760 - struct snd_seq_dev_ops ops;
72761 + struct snd_seq_dev_ops *ops;
72762
72763 /* registred devices */
72764 struct list_head dev_list; /* list of devices */
72765 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
72766
72767 mutex_lock(&ops->reg_mutex);
72768 /* copy driver operators */
72769 - ops->ops = *entry;
72770 + ops->ops = entry;
72771 ops->driver |= DRIVER_LOADED;
72772 ops->argsize = argsize;
72773
72774 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
72775 dev->name, ops->id, ops->argsize, dev->argsize);
72776 return -EINVAL;
72777 }
72778 - if (ops->ops.init_device(dev) >= 0) {
72779 + if (ops->ops->init_device(dev) >= 0) {
72780 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
72781 ops->num_init_devices++;
72782 } else {
72783 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
72784 dev->name, ops->id, ops->argsize, dev->argsize);
72785 return -EINVAL;
72786 }
72787 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
72788 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
72789 dev->status = SNDRV_SEQ_DEVICE_FREE;
72790 dev->driver_data = NULL;
72791 ops->num_init_devices--;
72792 diff -urNp linux-3.0.4/sound/drivers/mts64.c linux-3.0.4/sound/drivers/mts64.c
72793 --- linux-3.0.4/sound/drivers/mts64.c 2011-07-21 22:17:23.000000000 -0400
72794 +++ linux-3.0.4/sound/drivers/mts64.c 2011-08-23 21:47:56.000000000 -0400
72795 @@ -28,6 +28,7 @@
72796 #include <sound/initval.h>
72797 #include <sound/rawmidi.h>
72798 #include <sound/control.h>
72799 +#include <asm/local.h>
72800
72801 #define CARD_NAME "Miditerminal 4140"
72802 #define DRIVER_NAME "MTS64"
72803 @@ -66,7 +67,7 @@ struct mts64 {
72804 struct pardevice *pardev;
72805 int pardev_claimed;
72806
72807 - int open_count;
72808 + local_t open_count;
72809 int current_midi_output_port;
72810 int current_midi_input_port;
72811 u8 mode[MTS64_NUM_INPUT_PORTS];
72812 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
72813 {
72814 struct mts64 *mts = substream->rmidi->private_data;
72815
72816 - if (mts->open_count == 0) {
72817 + if (local_read(&mts->open_count) == 0) {
72818 /* We don't need a spinlock here, because this is just called
72819 if the device has not been opened before.
72820 So there aren't any IRQs from the device */
72821 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
72822
72823 msleep(50);
72824 }
72825 - ++(mts->open_count);
72826 + local_inc(&mts->open_count);
72827
72828 return 0;
72829 }
72830 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
72831 struct mts64 *mts = substream->rmidi->private_data;
72832 unsigned long flags;
72833
72834 - --(mts->open_count);
72835 - if (mts->open_count == 0) {
72836 + if (local_dec_return(&mts->open_count) == 0) {
72837 /* We need the spinlock_irqsave here because we can still
72838 have IRQs at this point */
72839 spin_lock_irqsave(&mts->lock, flags);
72840 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
72841
72842 msleep(500);
72843
72844 - } else if (mts->open_count < 0)
72845 - mts->open_count = 0;
72846 + } else if (local_read(&mts->open_count) < 0)
72847 + local_set(&mts->open_count, 0);
72848
72849 return 0;
72850 }
72851 diff -urNp linux-3.0.4/sound/drivers/opl4/opl4_lib.c linux-3.0.4/sound/drivers/opl4/opl4_lib.c
72852 --- linux-3.0.4/sound/drivers/opl4/opl4_lib.c 2011-07-21 22:17:23.000000000 -0400
72853 +++ linux-3.0.4/sound/drivers/opl4/opl4_lib.c 2011-08-23 21:47:56.000000000 -0400
72854 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
72855 MODULE_DESCRIPTION("OPL4 driver");
72856 MODULE_LICENSE("GPL");
72857
72858 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
72859 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
72860 {
72861 int timeout = 10;
72862 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
72863 diff -urNp linux-3.0.4/sound/drivers/portman2x4.c linux-3.0.4/sound/drivers/portman2x4.c
72864 --- linux-3.0.4/sound/drivers/portman2x4.c 2011-07-21 22:17:23.000000000 -0400
72865 +++ linux-3.0.4/sound/drivers/portman2x4.c 2011-08-23 21:47:56.000000000 -0400
72866 @@ -47,6 +47,7 @@
72867 #include <sound/initval.h>
72868 #include <sound/rawmidi.h>
72869 #include <sound/control.h>
72870 +#include <asm/local.h>
72871
72872 #define CARD_NAME "Portman 2x4"
72873 #define DRIVER_NAME "portman"
72874 @@ -84,7 +85,7 @@ struct portman {
72875 struct pardevice *pardev;
72876 int pardev_claimed;
72877
72878 - int open_count;
72879 + local_t open_count;
72880 int mode[PORTMAN_NUM_INPUT_PORTS];
72881 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
72882 };
72883 diff -urNp linux-3.0.4/sound/firewire/amdtp.c linux-3.0.4/sound/firewire/amdtp.c
72884 --- linux-3.0.4/sound/firewire/amdtp.c 2011-07-21 22:17:23.000000000 -0400
72885 +++ linux-3.0.4/sound/firewire/amdtp.c 2011-08-23 21:47:56.000000000 -0400
72886 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
72887 ptr = s->pcm_buffer_pointer + data_blocks;
72888 if (ptr >= pcm->runtime->buffer_size)
72889 ptr -= pcm->runtime->buffer_size;
72890 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
72891 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
72892
72893 s->pcm_period_pointer += data_blocks;
72894 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
72895 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
72896 */
72897 void amdtp_out_stream_update(struct amdtp_out_stream *s)
72898 {
72899 - ACCESS_ONCE(s->source_node_id_field) =
72900 + ACCESS_ONCE_RW(s->source_node_id_field) =
72901 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
72902 }
72903 EXPORT_SYMBOL(amdtp_out_stream_update);
72904 diff -urNp linux-3.0.4/sound/firewire/amdtp.h linux-3.0.4/sound/firewire/amdtp.h
72905 --- linux-3.0.4/sound/firewire/amdtp.h 2011-07-21 22:17:23.000000000 -0400
72906 +++ linux-3.0.4/sound/firewire/amdtp.h 2011-08-23 21:47:56.000000000 -0400
72907 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
72908 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
72909 struct snd_pcm_substream *pcm)
72910 {
72911 - ACCESS_ONCE(s->pcm) = pcm;
72912 + ACCESS_ONCE_RW(s->pcm) = pcm;
72913 }
72914
72915 /**
72916 diff -urNp linux-3.0.4/sound/firewire/isight.c linux-3.0.4/sound/firewire/isight.c
72917 --- linux-3.0.4/sound/firewire/isight.c 2011-07-21 22:17:23.000000000 -0400
72918 +++ linux-3.0.4/sound/firewire/isight.c 2011-08-23 21:47:56.000000000 -0400
72919 @@ -97,7 +97,7 @@ static void isight_update_pointers(struc
72920 ptr += count;
72921 if (ptr >= runtime->buffer_size)
72922 ptr -= runtime->buffer_size;
72923 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
72924 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
72925
72926 isight->period_counter += count;
72927 if (isight->period_counter >= runtime->period_size) {
72928 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
72929 if (err < 0)
72930 return err;
72931
72932 - ACCESS_ONCE(isight->pcm_active) = true;
72933 + ACCESS_ONCE_RW(isight->pcm_active) = true;
72934
72935 return 0;
72936 }
72937 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
72938 {
72939 struct isight *isight = substream->private_data;
72940
72941 - ACCESS_ONCE(isight->pcm_active) = false;
72942 + ACCESS_ONCE_RW(isight->pcm_active) = false;
72943
72944 mutex_lock(&isight->mutex);
72945 isight_stop_streaming(isight);
72946 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
72947
72948 switch (cmd) {
72949 case SNDRV_PCM_TRIGGER_START:
72950 - ACCESS_ONCE(isight->pcm_running) = true;
72951 + ACCESS_ONCE_RW(isight->pcm_running) = true;
72952 break;
72953 case SNDRV_PCM_TRIGGER_STOP:
72954 - ACCESS_ONCE(isight->pcm_running) = false;
72955 + ACCESS_ONCE_RW(isight->pcm_running) = false;
72956 break;
72957 default:
72958 return -EINVAL;
72959 diff -urNp linux-3.0.4/sound/isa/cmi8330.c linux-3.0.4/sound/isa/cmi8330.c
72960 --- linux-3.0.4/sound/isa/cmi8330.c 2011-07-21 22:17:23.000000000 -0400
72961 +++ linux-3.0.4/sound/isa/cmi8330.c 2011-08-23 21:47:56.000000000 -0400
72962 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
72963
72964 struct snd_pcm *pcm;
72965 struct snd_cmi8330_stream {
72966 - struct snd_pcm_ops ops;
72967 + snd_pcm_ops_no_const ops;
72968 snd_pcm_open_callback_t open;
72969 void *private_data; /* sb or wss */
72970 } streams[2];
72971 diff -urNp linux-3.0.4/sound/oss/sb_audio.c linux-3.0.4/sound/oss/sb_audio.c
72972 --- linux-3.0.4/sound/oss/sb_audio.c 2011-07-21 22:17:23.000000000 -0400
72973 +++ linux-3.0.4/sound/oss/sb_audio.c 2011-08-23 21:47:56.000000000 -0400
72974 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
72975 buf16 = (signed short *)(localbuf + localoffs);
72976 while (c)
72977 {
72978 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72979 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72980 if (copy_from_user(lbuf8,
72981 userbuf+useroffs + p,
72982 locallen))
72983 diff -urNp linux-3.0.4/sound/oss/swarm_cs4297a.c linux-3.0.4/sound/oss/swarm_cs4297a.c
72984 --- linux-3.0.4/sound/oss/swarm_cs4297a.c 2011-07-21 22:17:23.000000000 -0400
72985 +++ linux-3.0.4/sound/oss/swarm_cs4297a.c 2011-08-23 21:47:56.000000000 -0400
72986 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
72987 {
72988 struct cs4297a_state *s;
72989 u32 pwr, id;
72990 - mm_segment_t fs;
72991 int rval;
72992 #ifndef CONFIG_BCM_CS4297A_CSWARM
72993 u64 cfg;
72994 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
72995 if (!rval) {
72996 char *sb1250_duart_present;
72997
72998 +#if 0
72999 + mm_segment_t fs;
73000 fs = get_fs();
73001 set_fs(KERNEL_DS);
73002 -#if 0
73003 val = SOUND_MASK_LINE;
73004 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
73005 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
73006 val = initvol[i].vol;
73007 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
73008 }
73009 + set_fs(fs);
73010 // cs4297a_write_ac97(s, 0x18, 0x0808);
73011 #else
73012 // cs4297a_write_ac97(s, 0x5e, 0x180);
73013 cs4297a_write_ac97(s, 0x02, 0x0808);
73014 cs4297a_write_ac97(s, 0x18, 0x0808);
73015 #endif
73016 - set_fs(fs);
73017
73018 list_add(&s->list, &cs4297a_devs);
73019
73020 diff -urNp linux-3.0.4/sound/pci/hda/hda_codec.h linux-3.0.4/sound/pci/hda/hda_codec.h
73021 --- linux-3.0.4/sound/pci/hda/hda_codec.h 2011-07-21 22:17:23.000000000 -0400
73022 +++ linux-3.0.4/sound/pci/hda/hda_codec.h 2011-08-23 21:47:56.000000000 -0400
73023 @@ -615,7 +615,7 @@ struct hda_bus_ops {
73024 /* notify power-up/down from codec to controller */
73025 void (*pm_notify)(struct hda_bus *bus);
73026 #endif
73027 -};
73028 +} __no_const;
73029
73030 /* template to pass to the bus constructor */
73031 struct hda_bus_template {
73032 @@ -713,6 +713,7 @@ struct hda_codec_ops {
73033 #endif
73034 void (*reboot_notify)(struct hda_codec *codec);
73035 };
73036 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
73037
73038 /* record for amp information cache */
73039 struct hda_cache_head {
73040 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
73041 struct snd_pcm_substream *substream);
73042 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
73043 struct snd_pcm_substream *substream);
73044 -};
73045 +} __no_const;
73046
73047 /* PCM information for each substream */
73048 struct hda_pcm_stream {
73049 @@ -801,7 +802,7 @@ struct hda_codec {
73050 const char *modelname; /* model name for preset */
73051
73052 /* set by patch */
73053 - struct hda_codec_ops patch_ops;
73054 + hda_codec_ops_no_const patch_ops;
73055
73056 /* PCM to create, set by patch_ops.build_pcms callback */
73057 unsigned int num_pcms;
73058 diff -urNp linux-3.0.4/sound/pci/ice1712/ice1712.h linux-3.0.4/sound/pci/ice1712/ice1712.h
73059 --- linux-3.0.4/sound/pci/ice1712/ice1712.h 2011-07-21 22:17:23.000000000 -0400
73060 +++ linux-3.0.4/sound/pci/ice1712/ice1712.h 2011-08-23 21:47:56.000000000 -0400
73061 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
73062 unsigned int mask_flags; /* total mask bits */
73063 struct snd_akm4xxx_ops {
73064 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
73065 - } ops;
73066 + } __no_const ops;
73067 };
73068
73069 struct snd_ice1712_spdif {
73070 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
73071 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
73072 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
73073 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
73074 - } ops;
73075 + } __no_const ops;
73076 };
73077
73078
73079 diff -urNp linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c
73080 --- linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c 2011-07-21 22:17:23.000000000 -0400
73081 +++ linux-3.0.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-23 21:47:56.000000000 -0400
73082 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
73083 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
73084 break;
73085 }
73086 - if (atomic_read(&chip->interrupt_sleep_count)) {
73087 - atomic_set(&chip->interrupt_sleep_count, 0);
73088 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
73089 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
73090 wake_up(&chip->interrupt_sleep);
73091 }
73092 __end:
73093 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
73094 continue;
73095 init_waitqueue_entry(&wait, current);
73096 add_wait_queue(&chip->interrupt_sleep, &wait);
73097 - atomic_inc(&chip->interrupt_sleep_count);
73098 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
73099 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
73100 remove_wait_queue(&chip->interrupt_sleep, &wait);
73101 }
73102 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
73103 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
73104 spin_unlock(&chip->reg_lock);
73105
73106 - if (atomic_read(&chip->interrupt_sleep_count)) {
73107 - atomic_set(&chip->interrupt_sleep_count, 0);
73108 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
73109 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
73110 wake_up(&chip->interrupt_sleep);
73111 }
73112 }
73113 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
73114 spin_lock_init(&chip->reg_lock);
73115 spin_lock_init(&chip->voice_lock);
73116 init_waitqueue_head(&chip->interrupt_sleep);
73117 - atomic_set(&chip->interrupt_sleep_count, 0);
73118 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
73119 chip->card = card;
73120 chip->pci = pci;
73121 chip->irq = -1;
73122 diff -urNp linux-3.0.4/sound/soc/soc-core.c linux-3.0.4/sound/soc/soc-core.c
73123 --- linux-3.0.4/sound/soc/soc-core.c 2011-09-02 18:11:21.000000000 -0400
73124 +++ linux-3.0.4/sound/soc/soc-core.c 2011-08-23 21:47:56.000000000 -0400
73125 @@ -1021,7 +1021,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
73126 }
73127
73128 /* ASoC PCM operations */
73129 -static struct snd_pcm_ops soc_pcm_ops = {
73130 +static snd_pcm_ops_no_const soc_pcm_ops = {
73131 .open = soc_pcm_open,
73132 .close = soc_codec_close,
73133 .hw_params = soc_pcm_hw_params,
73134 @@ -2128,6 +2128,7 @@ static int soc_new_pcm(struct snd_soc_pc
73135 rtd->pcm = pcm;
73136 pcm->private_data = rtd;
73137 if (platform->driver->ops) {
73138 + /* this whole logic is broken... */
73139 soc_pcm_ops.mmap = platform->driver->ops->mmap;
73140 soc_pcm_ops.pointer = platform->driver->ops->pointer;
73141 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
73142 diff -urNp linux-3.0.4/sound/usb/card.h linux-3.0.4/sound/usb/card.h
73143 --- linux-3.0.4/sound/usb/card.h 2011-07-21 22:17:23.000000000 -0400
73144 +++ linux-3.0.4/sound/usb/card.h 2011-08-23 21:47:56.000000000 -0400
73145 @@ -44,6 +44,7 @@ struct snd_urb_ops {
73146 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
73147 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
73148 };
73149 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
73150
73151 struct snd_usb_substream {
73152 struct snd_usb_stream *stream;
73153 @@ -93,7 +94,7 @@ struct snd_usb_substream {
73154 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
73155 spinlock_t lock;
73156
73157 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
73158 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
73159 };
73160
73161 struct snd_usb_stream {
73162 diff -urNp linux-3.0.4/tools/gcc/constify_plugin.c linux-3.0.4/tools/gcc/constify_plugin.c
73163 --- linux-3.0.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
73164 +++ linux-3.0.4/tools/gcc/constify_plugin.c 2011-08-30 18:23:52.000000000 -0400
73165 @@ -0,0 +1,293 @@
73166 +/*
73167 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
73168 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
73169 + * Licensed under the GPL v2, or (at your option) v3
73170 + *
73171 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
73172 + *
73173 + * Homepage:
73174 + * http://www.grsecurity.net/~ephox/const_plugin/
73175 + *
73176 + * Usage:
73177 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
73178 + * $ gcc -fplugin=constify_plugin.so test.c -O2
73179 + */
73180 +
73181 +#include "gcc-plugin.h"
73182 +#include "config.h"
73183 +#include "system.h"
73184 +#include "coretypes.h"
73185 +#include "tree.h"
73186 +#include "tree-pass.h"
73187 +#include "intl.h"
73188 +#include "plugin-version.h"
73189 +#include "tm.h"
73190 +#include "toplev.h"
73191 +#include "function.h"
73192 +#include "tree-flow.h"
73193 +#include "plugin.h"
73194 +#include "diagnostic.h"
73195 +//#include "c-tree.h"
73196 +
73197 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
73198 +
73199 +int plugin_is_GPL_compatible;
73200 +
73201 +static struct plugin_info const_plugin_info = {
73202 + .version = "20110826",
73203 + .help = "no-constify\tturn off constification\n",
73204 +};
73205 +
73206 +static void constify_type(tree type);
73207 +static bool walk_struct(tree node);
73208 +
73209 +static tree deconstify_type(tree old_type)
73210 +{
73211 + tree new_type, field;
73212 +
73213 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
73214 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
73215 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
73216 + DECL_FIELD_CONTEXT(field) = new_type;
73217 + TYPE_READONLY(new_type) = 0;
73218 + C_TYPE_FIELDS_READONLY(new_type) = 0;
73219 + return new_type;
73220 +}
73221 +
73222 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
73223 +{
73224 + tree type;
73225 +
73226 + *no_add_attrs = true;
73227 + if (TREE_CODE(*node) == FUNCTION_DECL) {
73228 + error("%qE attribute does not apply to functions", name);
73229 + return NULL_TREE;
73230 + }
73231 +
73232 + if (TREE_CODE(*node) == VAR_DECL) {
73233 + error("%qE attribute does not apply to variables", name);
73234 + return NULL_TREE;
73235 + }
73236 +
73237 + if (TYPE_P(*node)) {
73238 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
73239 + *no_add_attrs = false;
73240 + else
73241 + error("%qE attribute applies to struct and union types only", name);
73242 + return NULL_TREE;
73243 + }
73244 +
73245 + type = TREE_TYPE(*node);
73246 +
73247 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
73248 + error("%qE attribute applies to struct and union types only", name);
73249 + return NULL_TREE;
73250 + }
73251 +
73252 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
73253 + error("%qE attribute is already applied to the type", name);
73254 + return NULL_TREE;
73255 + }
73256 +
73257 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
73258 + error("%qE attribute used on type that is not constified", name);
73259 + return NULL_TREE;
73260 + }
73261 +
73262 + if (TREE_CODE(*node) == TYPE_DECL) {
73263 + TREE_TYPE(*node) = deconstify_type(type);
73264 + TREE_READONLY(*node) = 0;
73265 + return NULL_TREE;
73266 + }
73267 +
73268 + return NULL_TREE;
73269 +}
73270 +
73271 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
73272 +{
73273 + *no_add_attrs = true;
73274 + if (!TYPE_P(*node)) {
73275 + error("%qE attribute applies to types only", name);
73276 + return NULL_TREE;
73277 + }
73278 +
73279 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
73280 + error("%qE attribute applies to struct and union types only", name);
73281 + return NULL_TREE;
73282 + }
73283 +
73284 + *no_add_attrs = false;
73285 + constify_type(*node);
73286 + return NULL_TREE;
73287 +}
73288 +
73289 +static struct attribute_spec no_const_attr = {
73290 + .name = "no_const",
73291 + .min_length = 0,
73292 + .max_length = 0,
73293 + .decl_required = false,
73294 + .type_required = false,
73295 + .function_type_required = false,
73296 + .handler = handle_no_const_attribute
73297 +};
73298 +
73299 +static struct attribute_spec do_const_attr = {
73300 + .name = "do_const",
73301 + .min_length = 0,
73302 + .max_length = 0,
73303 + .decl_required = false,
73304 + .type_required = false,
73305 + .function_type_required = false,
73306 + .handler = handle_do_const_attribute
73307 +};
73308 +
73309 +static void register_attributes(void *event_data, void *data)
73310 +{
73311 + register_attribute(&no_const_attr);
73312 + register_attribute(&do_const_attr);
73313 +}
73314 +
73315 +static void constify_type(tree type)
73316 +{
73317 + TYPE_READONLY(type) = 1;
73318 + C_TYPE_FIELDS_READONLY(type) = 1;
73319 +}
73320 +
73321 +static bool is_fptr(tree field)
73322 +{
73323 + tree ptr = TREE_TYPE(field);
73324 +
73325 + if (TREE_CODE(ptr) != POINTER_TYPE)
73326 + return false;
73327 +
73328 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
73329 +}
73330 +
73331 +static bool walk_struct(tree node)
73332 +{
73333 + tree field;
73334 +
73335 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
73336 + return false;
73337 +
73338 + if (TYPE_FIELDS(node) == NULL_TREE)
73339 + return false;
73340 +
73341 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
73342 + tree type = TREE_TYPE(field);
73343 + enum tree_code code = TREE_CODE(type);
73344 + if (code == RECORD_TYPE || code == UNION_TYPE) {
73345 + if (!(walk_struct(type)))
73346 + return false;
73347 + } else if (!is_fptr(field) && !TREE_READONLY(field))
73348 + return false;
73349 + }
73350 + return true;
73351 +}
73352 +
73353 +static void finish_type(void *event_data, void *data)
73354 +{
73355 + tree type = (tree)event_data;
73356 +
73357 + if (type == NULL_TREE)
73358 + return;
73359 +
73360 + if (TYPE_READONLY(type))
73361 + return;
73362 +
73363 + if (walk_struct(type))
73364 + constify_type(type);
73365 +}
73366 +
73367 +static unsigned int check_local_variables(void);
73368 +
73369 +struct gimple_opt_pass pass_local_variable = {
73370 + {
73371 + .type = GIMPLE_PASS,
73372 + .name = "check_local_variables",
73373 + .gate = NULL,
73374 + .execute = check_local_variables,
73375 + .sub = NULL,
73376 + .next = NULL,
73377 + .static_pass_number = 0,
73378 + .tv_id = TV_NONE,
73379 + .properties_required = 0,
73380 + .properties_provided = 0,
73381 + .properties_destroyed = 0,
73382 + .todo_flags_start = 0,
73383 + .todo_flags_finish = 0
73384 + }
73385 +};
73386 +
73387 +static unsigned int check_local_variables(void)
73388 +{
73389 + tree var;
73390 + referenced_var_iterator rvi;
73391 +
73392 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
73393 + FOR_EACH_REFERENCED_VAR(var, rvi) {
73394 +#else
73395 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
73396 +#endif
73397 + tree type = TREE_TYPE(var);
73398 +
73399 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
73400 + continue;
73401 +
73402 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
73403 + continue;
73404 +
73405 + if (!TYPE_READONLY(type))
73406 + continue;
73407 +
73408 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
73409 +// continue;
73410 +
73411 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
73412 +// continue;
73413 +
73414 + if (walk_struct(type)) {
73415 + error("constified variable %qE cannot be local", var);
73416 + return 1;
73417 + }
73418 + }
73419 + return 0;
73420 +}
73421 +
73422 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
73423 +{
73424 + const char * const plugin_name = plugin_info->base_name;
73425 + const int argc = plugin_info->argc;
73426 + const struct plugin_argument * const argv = plugin_info->argv;
73427 + int i;
73428 + bool constify = true;
73429 +
73430 + struct register_pass_info local_variable_pass_info = {
73431 + .pass = &pass_local_variable.pass,
73432 + .reference_pass_name = "*referenced_vars",
73433 + .ref_pass_instance_number = 0,
73434 + .pos_op = PASS_POS_INSERT_AFTER
73435 + };
73436 +
73437 + if (!plugin_default_version_check(version, &gcc_version)) {
73438 + error(G_("incompatible gcc/plugin versions"));
73439 + return 1;
73440 + }
73441 +
73442 + for (i = 0; i < argc; ++i) {
73443 + if (!(strcmp(argv[i].key, "no-constify"))) {
73444 + constify = false;
73445 + continue;
73446 + }
73447 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
73448 + }
73449 +
73450 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
73451 + if (constify) {
73452 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
73453 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
73454 + }
73455 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
73456 +
73457 + return 0;
73458 +}
73459 diff -urNp linux-3.0.4/tools/gcc/kallocstat_plugin.c linux-3.0.4/tools/gcc/kallocstat_plugin.c
73460 --- linux-3.0.4/tools/gcc/kallocstat_plugin.c 1969-12-31 19:00:00.000000000 -0500
73461 +++ linux-3.0.4/tools/gcc/kallocstat_plugin.c 2011-09-17 00:53:44.000000000 -0400
73462 @@ -0,0 +1,165 @@
73463 +/*
73464 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
73465 + * Licensed under the GPL v2
73466 + *
73467 + * Note: the choice of the license means that the compilation process is
73468 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
73469 + * but for the kernel it doesn't matter since it doesn't link against
73470 + * any of the gcc libraries
73471 + *
73472 + * gcc plugin to find the distribution of k*alloc sizes
73473 + *
73474 + * TODO:
73475 + *
73476 + * BUGS:
73477 + * - none known
73478 + */
73479 +#include "gcc-plugin.h"
73480 +#include "config.h"
73481 +#include "system.h"
73482 +#include "coretypes.h"
73483 +#include "tree.h"
73484 +#include "tree-pass.h"
73485 +#include "intl.h"
73486 +#include "plugin-version.h"
73487 +#include "tm.h"
73488 +#include "toplev.h"
73489 +#include "basic-block.h"
73490 +#include "gimple.h"
73491 +//#include "expr.h" where are you...
73492 +#include "diagnostic.h"
73493 +#include "rtl.h"
73494 +#include "emit-rtl.h"
73495 +#include "function.h"
73496 +
73497 +extern void print_gimple_stmt(FILE *, gimple, int, int);
73498 +
73499 +int plugin_is_GPL_compatible;
73500 +
73501 +static const char * const kalloc_functions[] = {
73502 + "__kmalloc",
73503 + "kmalloc",
73504 + "kmalloc_large",
73505 + "kmalloc_node",
73506 + "kmalloc_order",
73507 + "kmalloc_order_trace",
73508 + "kmalloc_slab",
73509 + "kzalloc",
73510 + "kzalloc_node",
73511 +};
73512 +
73513 +static struct plugin_info kallocstat_plugin_info = {
73514 + .version = "201109121100",
73515 +};
73516 +
73517 +static unsigned int execute_kallocstat(void);
73518 +
73519 +static struct gimple_opt_pass kallocstat_pass = {
73520 + .pass = {
73521 + .type = GIMPLE_PASS,
73522 + .name = "kallocstat",
73523 + .gate = NULL,
73524 + .execute = execute_kallocstat,
73525 + .sub = NULL,
73526 + .next = NULL,
73527 + .static_pass_number = 0,
73528 + .tv_id = TV_NONE,
73529 + .properties_required = 0,
73530 + .properties_provided = 0,
73531 + .properties_destroyed = 0,
73532 + .todo_flags_start = 0,
73533 + .todo_flags_finish = 0
73534 + }
73535 +};
73536 +
73537 +static bool is_kalloc(const char *fnname)
73538 +{
73539 + size_t i;
73540 +
73541 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
73542 + if (!strcmp(fnname, kalloc_functions[i]))
73543 + return true;
73544 + return false;
73545 +}
73546 +
73547 +static unsigned int execute_kallocstat(void)
73548 +{
73549 + basic_block bb;
73550 + gimple_stmt_iterator gsi;
73551 +
73552 + // 1. loop through BBs and GIMPLE statements
73553 + FOR_EACH_BB(bb) {
73554 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
73555 + // gimple match:
73556 + tree fndecl, size;
73557 + gimple call_stmt;
73558 + const char *fnname;
73559 +
73560 + // is it a call
73561 + call_stmt = gsi_stmt(gsi);
73562 + if (!is_gimple_call(call_stmt))
73563 + continue;
73564 + fndecl = gimple_call_fndecl(call_stmt);
73565 + if (fndecl == NULL_TREE)
73566 + continue;
73567 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
73568 + continue;
73569 +
73570 + // is it a call to k*alloc
73571 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
73572 + if (!is_kalloc(fnname))
73573 + continue;
73574 +
73575 + // is the size arg the result of a simple const assignment
73576 + size = gimple_call_arg(call_stmt, 0);
73577 + while (true) {
73578 + gimple def_stmt;
73579 + expanded_location xloc;
73580 + size_t size_val;
73581 +
73582 + if (TREE_CODE(size) != SSA_NAME)
73583 + break;
73584 + def_stmt = SSA_NAME_DEF_STMT(size);
73585 + if (!def_stmt || !is_gimple_assign(def_stmt))
73586 + break;
73587 + if (gimple_num_ops(def_stmt) != 2)
73588 + break;
73589 + size = gimple_assign_rhs1(def_stmt);
73590 + if (!TREE_CONSTANT(size))
73591 + continue;
73592 + xloc = expand_location(gimple_location(def_stmt));
73593 + if (!xloc.file)
73594 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
73595 + size_val = TREE_INT_CST_LOW(size);
73596 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
73597 + break;
73598 + }
73599 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
73600 +//debug_tree(gimple_call_fn(call_stmt));
73601 +//print_node(stderr, "pax", fndecl, 4);
73602 + }
73603 + }
73604 +
73605 + return 0;
73606 +}
73607 +
73608 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
73609 +{
73610 + const char * const plugin_name = plugin_info->base_name;
73611 + struct register_pass_info kallocstat_pass_info = {
73612 + .pass = &kallocstat_pass.pass,
73613 + .reference_pass_name = "ssa",
73614 + .ref_pass_instance_number = 0,
73615 + .pos_op = PASS_POS_INSERT_AFTER
73616 + };
73617 +
73618 + if (!plugin_default_version_check(version, &gcc_version)) {
73619 + error(G_("incompatible gcc/plugin versions"));
73620 + return 1;
73621 + }
73622 +
73623 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
73624 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
73625 +
73626 + return 0;
73627 +}
73628 diff -urNp linux-3.0.4/tools/gcc/kernexec_plugin.c linux-3.0.4/tools/gcc/kernexec_plugin.c
73629 --- linux-3.0.4/tools/gcc/kernexec_plugin.c 1969-12-31 19:00:00.000000000 -0500
73630 +++ linux-3.0.4/tools/gcc/kernexec_plugin.c 2011-09-19 09:16:58.000000000 -0400
73631 @@ -0,0 +1,265 @@
73632 +/*
73633 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
73634 + * Licensed under the GPL v2
73635 + *
73636 + * Note: the choice of the license means that the compilation process is
73637 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
73638 + * but for the kernel it doesn't matter since it doesn't link against
73639 + * any of the gcc libraries
73640 + *
73641 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
73642 + *
73643 + * TODO:
73644 + *
73645 + * BUGS:
73646 + * - none known
73647 + */
73648 +#include "gcc-plugin.h"
73649 +#include "config.h"
73650 +#include "system.h"
73651 +#include "coretypes.h"
73652 +#include "tree.h"
73653 +#include "tree-pass.h"
73654 +#include "intl.h"
73655 +#include "plugin-version.h"
73656 +#include "tm.h"
73657 +#include "toplev.h"
73658 +#include "basic-block.h"
73659 +#include "gimple.h"
73660 +//#include "expr.h" where are you...
73661 +#include "diagnostic.h"
73662 +#include "rtl.h"
73663 +#include "emit-rtl.h"
73664 +#include "function.h"
73665 +#include "tree-flow.h"
73666 +
73667 +extern void print_gimple_stmt(FILE *, gimple, int, int);
73668 +
73669 +int plugin_is_GPL_compatible;
73670 +
73671 +static struct plugin_info kernexec_plugin_info = {
73672 + .version = "201109191200",
73673 +};
73674 +
73675 +static unsigned int execute_kernexec_fptr(void);
73676 +static unsigned int execute_kernexec_retaddr(void);
73677 +
73678 +static struct gimple_opt_pass kernexec_fptr_pass = {
73679 + .pass = {
73680 + .type = GIMPLE_PASS,
73681 + .name = "kernexec_fptr",
73682 + .gate = NULL,
73683 + .execute = execute_kernexec_fptr,
73684 + .sub = NULL,
73685 + .next = NULL,
73686 + .static_pass_number = 0,
73687 + .tv_id = TV_NONE,
73688 + .properties_required = 0,
73689 + .properties_provided = 0,
73690 + .properties_destroyed = 0,
73691 + .todo_flags_start = 0,
73692 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
73693 + }
73694 +};
73695 +
73696 +static struct rtl_opt_pass kernexec_retaddr_pass = {
73697 + .pass = {
73698 + .type = RTL_PASS,
73699 + .name = "kernexec_retaddr",
73700 + .gate = NULL,
73701 + .execute = execute_kernexec_retaddr,
73702 + .sub = NULL,
73703 + .next = NULL,
73704 + .static_pass_number = 0,
73705 + .tv_id = TV_NONE,
73706 + .properties_required = 0,
73707 + .properties_provided = 0,
73708 + .properties_destroyed = 0,
73709 + .todo_flags_start = 0,
73710 + .todo_flags_finish = TODO_dump_func
73711 + }
73712 +};
73713 +
73714 +/*
73715 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
73716 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
73717 + */
73718 +static void kernexec_instrument_fptr(gimple_stmt_iterator gsi)
73719 +{
73720 + gimple assign_intptr, assign_new_fptr, call_stmt;
73721 + tree intptr, old_fptr, new_fptr, kernexec_mask;
73722 +
73723 + call_stmt = gsi_stmt(gsi);
73724 + old_fptr = gimple_call_fn(call_stmt);
73725 +
73726 + // create temporary unsigned long variable used for bitops and cast fptr to it
73727 + intptr = create_tmp_var(long_unsigned_type_node, NULL);
73728 + add_referenced_var(intptr);
73729 + mark_sym_for_renaming(intptr);
73730 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
73731 + update_stmt(assign_intptr);
73732 + gsi_insert_before(&gsi, assign_intptr, GSI_NEW_STMT);
73733 +
73734 + gsi_next(&gsi);
73735 +
73736 + // apply logical or to temporary unsigned long and bitmask
73737 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
73738 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
73739 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
73740 + update_stmt(assign_intptr);
73741 + gsi_insert_before(&gsi, assign_intptr, GSI_NEW_STMT);
73742 +
73743 + gsi_next(&gsi);
73744 +
73745 + // cast temporary unsigned long back to a temporary fptr variable
73746 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), NULL);
73747 + add_referenced_var(new_fptr);
73748 + mark_sym_for_renaming(new_fptr);
73749 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
73750 + update_stmt(assign_new_fptr);
73751 + gsi_insert_before(&gsi, assign_new_fptr, GSI_NEW_STMT);
73752 +
73753 + gsi_next(&gsi);
73754 +
73755 + // replace call stmt fn with the new fptr
73756 + gimple_call_set_fn(call_stmt, new_fptr);
73757 + update_stmt(call_stmt);
73758 +}
73759 +
73760 +/*
73761 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
73762 + */
73763 +static unsigned int execute_kernexec_fptr(void)
73764 +{
73765 + basic_block bb;
73766 + gimple_stmt_iterator gsi;
73767 +
73768 + // 1. loop through BBs and GIMPLE statements
73769 + FOR_EACH_BB(bb) {
73770 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
73771 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
73772 + tree fn;
73773 + gimple call_stmt;
73774 +
73775 + // is it a call ...
73776 + call_stmt = gsi_stmt(gsi);
73777 + if (!is_gimple_call(call_stmt))
73778 + continue;
73779 + fn = gimple_call_fn(call_stmt);
73780 + if (TREE_CODE(fn) == ADDR_EXPR)
73781 + continue;
73782 + if (TREE_CODE(fn) != SSA_NAME)
73783 + gcc_unreachable();
73784 +
73785 + // ... through a function pointer
73786 + fn = SSA_NAME_VAR(fn);
73787 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
73788 + continue;
73789 + fn = TREE_TYPE(fn);
73790 + if (TREE_CODE(fn) != POINTER_TYPE)
73791 + continue;
73792 + fn = TREE_TYPE(fn);
73793 + if (TREE_CODE(fn) != FUNCTION_TYPE)
73794 + continue;
73795 +
73796 + kernexec_instrument_fptr(gsi);
73797 +
73798 +//debug_tree(gimple_call_fn(call_stmt));
73799 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
73800 + }
73801 + }
73802 +
73803 + return 0;
73804 +}
73805 +
73806 +// add special KERNEXEC instrumentation: orb $0x80,7(%rsp) just before retn
73807 +static void kernexec_instrument_retaddr(rtx insn)
73808 +{
73809 + rtx ret_addr, clob, or;
73810 +
73811 + start_sequence();
73812 +
73813 + // compute 7(%rsp)
73814 + ret_addr = gen_rtx_MEM(QImode, gen_rtx_PLUS(Pmode, stack_pointer_rtx, GEN_INT(7)));
73815 + MEM_VOLATILE_P(ret_addr) = 1;
73816 +
73817 + // create orb $0x80,7(%rsp)
73818 + or = gen_rtx_SET(VOIDmode, ret_addr, gen_rtx_IOR(QImode, ret_addr, GEN_INT(0xffffffffffffff80)));
73819 + clob = gen_rtx_CLOBBER(VOIDmode, gen_rtx_REG(CCmode, FLAGS_REG));
73820 +
73821 + // put everything together
73822 + or = emit_insn(gen_rtx_PARALLEL(VOIDmode, gen_rtvec(2, or, clob)));
73823 + RTX_FRAME_RELATED_P(or) = 1;
73824 +
73825 + end_sequence();
73826 +
73827 + emit_insn_before(or, insn);
73828 +}
73829 +
73830 +/*
73831 + * find all asm level function returns and forcibly set the highest bit of the return address
73832 + */
73833 +static unsigned int execute_kernexec_retaddr(void)
73834 +{
73835 + rtx insn;
73836 +
73837 + // 1. find function returns
73838 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
73839 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
73840 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
73841 + rtx body;
73842 +
73843 + // is it a retn
73844 + if (!JUMP_P(insn))
73845 + continue;
73846 + body = PATTERN(insn);
73847 + if (GET_CODE(body) == PARALLEL)
73848 + body = XVECEXP(body, 0, 0);
73849 + if (GET_CODE(body) != RETURN)
73850 + continue;
73851 + kernexec_instrument_retaddr(insn);
73852 + }
73853 +
73854 +// print_simple_rtl(stderr, get_insns());
73855 +// print_rtl(stderr, get_insns());
73856 +
73857 + return 0;
73858 +}
73859 +
73860 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
73861 +{
73862 + const char * const plugin_name = plugin_info->base_name;
73863 + const int argc = plugin_info->argc;
73864 + const struct plugin_argument * const argv = plugin_info->argv;
73865 + int i;
73866 + struct register_pass_info kernexec_fptr_pass_info = {
73867 + .pass = &kernexec_fptr_pass.pass,
73868 + .reference_pass_name = "ssa",
73869 + .ref_pass_instance_number = 0,
73870 + .pos_op = PASS_POS_INSERT_AFTER
73871 + };
73872 + struct register_pass_info kernexec_retaddr_pass_info = {
73873 + .pass = &kernexec_retaddr_pass.pass,
73874 + .reference_pass_name = "pro_and_epilogue",
73875 + .ref_pass_instance_number = 0,
73876 + .pos_op = PASS_POS_INSERT_AFTER
73877 + };
73878 +
73879 + if (!plugin_default_version_check(version, &gcc_version)) {
73880 + error(G_("incompatible gcc/plugin versions"));
73881 + return 1;
73882 + }
73883 +
73884 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
73885 +
73886 + for (i = 0; i < argc; ++i)
73887 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
73888 +
73889 + if (TARGET_64BIT == 0 || ix86_cmodel != CM_KERNEL)
73890 + return 0;
73891 +
73892 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
73893 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
73894 +
73895 + return 0;
73896 +}
73897 diff -urNp linux-3.0.4/tools/gcc/Makefile linux-3.0.4/tools/gcc/Makefile
73898 --- linux-3.0.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
73899 +++ linux-3.0.4/tools/gcc/Makefile 2011-09-17 00:53:44.000000000 -0400
73900 @@ -0,0 +1,14 @@
73901 +#CC := gcc
73902 +#PLUGIN_SOURCE_FILES := pax_plugin.c
73903 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
73904 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
73905 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
73906 +
73907 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
73908 +
73909 +hostlibs-y := stackleak_plugin.so constify_plugin.so kallocstat_plugin.so kernexec_plugin.so
73910 +always := $(hostlibs-y)
73911 +stackleak_plugin-objs := stackleak_plugin.o
73912 +constify_plugin-objs := constify_plugin.o
73913 +kallocstat_plugin-objs := kallocstat_plugin.o
73914 +kernexec_plugin-objs := kernexec_plugin.o
73915 diff -urNp linux-3.0.4/tools/gcc/stackleak_plugin.c linux-3.0.4/tools/gcc/stackleak_plugin.c
73916 --- linux-3.0.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
73917 +++ linux-3.0.4/tools/gcc/stackleak_plugin.c 2011-09-17 00:53:44.000000000 -0400
73918 @@ -0,0 +1,251 @@
73919 +/*
73920 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
73921 + * Licensed under the GPL v2
73922 + *
73923 + * Note: the choice of the license means that the compilation process is
73924 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
73925 + * but for the kernel it doesn't matter since it doesn't link against
73926 + * any of the gcc libraries
73927 + *
73928 + * gcc plugin to help implement various PaX features
73929 + *
73930 + * - track lowest stack pointer
73931 + *
73932 + * TODO:
73933 + * - initialize all local variables
73934 + *
73935 + * BUGS:
73936 + * - none known
73937 + */
73938 +#include "gcc-plugin.h"
73939 +#include "config.h"
73940 +#include "system.h"
73941 +#include "coretypes.h"
73942 +#include "tree.h"
73943 +#include "tree-pass.h"
73944 +#include "intl.h"
73945 +#include "plugin-version.h"
73946 +#include "tm.h"
73947 +#include "toplev.h"
73948 +#include "basic-block.h"
73949 +#include "gimple.h"
73950 +//#include "expr.h" where are you...
73951 +#include "diagnostic.h"
73952 +#include "rtl.h"
73953 +#include "emit-rtl.h"
73954 +#include "function.h"
73955 +
73956 +int plugin_is_GPL_compatible;
73957 +
73958 +static int track_frame_size = -1;
73959 +static const char track_function[] = "pax_track_stack";
73960 +static bool init_locals;
73961 +
73962 +static struct plugin_info stackleak_plugin_info = {
73963 + .version = "201109112100",
73964 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
73965 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
73966 +};
73967 +
73968 +static bool gate_stackleak_track_stack(void);
73969 +static unsigned int execute_stackleak_tree_instrument(void);
73970 +static unsigned int execute_stackleak_final(void);
73971 +
73972 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
73973 + .pass = {
73974 + .type = GIMPLE_PASS,
73975 + .name = "stackleak_tree_instrument",
73976 + .gate = gate_stackleak_track_stack,
73977 + .execute = execute_stackleak_tree_instrument,
73978 + .sub = NULL,
73979 + .next = NULL,
73980 + .static_pass_number = 0,
73981 + .tv_id = TV_NONE,
73982 + .properties_required = PROP_gimple_leh | PROP_cfg,
73983 + .properties_provided = 0,
73984 + .properties_destroyed = 0,
73985 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
73986 + .todo_flags_finish = TODO_verify_stmts | TODO_dump_func
73987 + }
73988 +};
73989 +
73990 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
73991 + .pass = {
73992 + .type = RTL_PASS,
73993 + .name = "stackleak_final",
73994 + .gate = gate_stackleak_track_stack,
73995 + .execute = execute_stackleak_final,
73996 + .sub = NULL,
73997 + .next = NULL,
73998 + .static_pass_number = 0,
73999 + .tv_id = TV_NONE,
74000 + .properties_required = 0,
74001 + .properties_provided = 0,
74002 + .properties_destroyed = 0,
74003 + .todo_flags_start = 0,
74004 + .todo_flags_finish = TODO_dump_func
74005 + }
74006 +};
74007 +
74008 +static bool gate_stackleak_track_stack(void)
74009 +{
74010 + return track_frame_size >= 0;
74011 +}
74012 +
74013 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
74014 +{
74015 + gimple call;
74016 + tree fndecl, type;
74017 +
74018 + // insert call to void pax_track_stack(void)
74019 + type = build_function_type_list(void_type_node, NULL_TREE);
74020 + fndecl = build_fn_decl(track_function, type);
74021 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
74022 + call = gimple_build_call(fndecl, 0);
74023 + if (before)
74024 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
74025 + else
74026 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
74027 +}
74028 +
74029 +static unsigned int execute_stackleak_tree_instrument(void)
74030 +{
74031 + basic_block bb, entry_bb;
74032 + gimple_stmt_iterator gsi;
74033 + bool prologue_instrumented = false;
74034 +
74035 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
74036 +
74037 + // 1. loop through BBs and GIMPLE statements
74038 + FOR_EACH_BB(bb) {
74039 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
74040 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
74041 + tree fndecl;
74042 + gimple stmt = gsi_stmt(gsi);
74043 +
74044 + if (!is_gimple_call(stmt))
74045 + continue;
74046 + fndecl = gimple_call_fndecl(stmt);
74047 + if (!fndecl)
74048 + continue;
74049 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
74050 + continue;
74051 + if (!DECL_BUILT_IN(fndecl))
74052 + continue;
74053 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
74054 + continue;
74055 + if (DECL_FUNCTION_CODE(fndecl) != BUILT_IN_ALLOCA)
74056 + continue;
74057 +
74058 + // 2. insert track call after each __builtin_alloca call
74059 + stackleak_add_instrumentation(&gsi, false);
74060 + if (bb == entry_bb)
74061 + prologue_instrumented = true;
74062 +// print_node(stderr, "pax", fndecl, 4);
74063 + }
74064 + }
74065 +
74066 + // 3. insert track call at the beginning
74067 + if (!prologue_instrumented) {
74068 + gsi = gsi_start_bb(entry_bb);
74069 + stackleak_add_instrumentation(&gsi, true);
74070 + }
74071 +
74072 + return 0;
74073 +}
74074 +
74075 +static unsigned int execute_stackleak_final(void)
74076 +{
74077 + rtx insn;
74078 +
74079 + if (cfun->calls_alloca)
74080 + return 0;
74081 +
74082 + // keep calls only if function frame is big enough
74083 + if (get_frame_size() >= track_frame_size)
74084 + return 0;
74085 +
74086 + // 1. find pax_track_stack calls
74087 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
74088 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
74089 + rtx body;
74090 +
74091 + if (!CALL_P(insn))
74092 + continue;
74093 + body = PATTERN(insn);
74094 + if (GET_CODE(body) != CALL)
74095 + continue;
74096 + body = XEXP(body, 0);
74097 + if (GET_CODE(body) != MEM)
74098 + continue;
74099 + body = XEXP(body, 0);
74100 + if (GET_CODE(body) != SYMBOL_REF)
74101 + continue;
74102 + if (strcmp(XSTR(body, 0), track_function))
74103 + continue;
74104 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
74105 + // 2. delete call
74106 + delete_insn_and_edges(insn);
74107 + }
74108 +
74109 +// print_simple_rtl(stderr, get_insns());
74110 +// print_rtl(stderr, get_insns());
74111 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
74112 +
74113 + return 0;
74114 +}
74115 +
74116 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
74117 +{
74118 + const char * const plugin_name = plugin_info->base_name;
74119 + const int argc = plugin_info->argc;
74120 + const struct plugin_argument * const argv = plugin_info->argv;
74121 + int i;
74122 + struct register_pass_info stackleak_tree_instrument_pass_info = {
74123 + .pass = &stackleak_tree_instrument_pass.pass,
74124 +// .reference_pass_name = "tree_profile",
74125 + .reference_pass_name = "optimized",
74126 + .ref_pass_instance_number = 0,
74127 + .pos_op = PASS_POS_INSERT_AFTER
74128 + };
74129 + struct register_pass_info stackleak_final_pass_info = {
74130 + .pass = &stackleak_final_rtl_opt_pass.pass,
74131 + .reference_pass_name = "final",
74132 + .ref_pass_instance_number = 0,
74133 + .pos_op = PASS_POS_INSERT_BEFORE
74134 + };
74135 +
74136 + if (!plugin_default_version_check(version, &gcc_version)) {
74137 + error(G_("incompatible gcc/plugin versions"));
74138 + return 1;
74139 + }
74140 +
74141 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
74142 +
74143 + for (i = 0; i < argc; ++i) {
74144 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
74145 + if (!argv[i].value) {
74146 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
74147 + continue;
74148 + }
74149 + track_frame_size = atoi(argv[i].value);
74150 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
74151 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
74152 + continue;
74153 + }
74154 + if (!strcmp(argv[i].key, "initialize-locals")) {
74155 + if (argv[i].value) {
74156 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
74157 + continue;
74158 + }
74159 + init_locals = true;
74160 + continue;
74161 + }
74162 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
74163 + }
74164 +
74165 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
74166 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
74167 +
74168 + return 0;
74169 +}
74170 diff -urNp linux-3.0.4/usr/gen_init_cpio.c linux-3.0.4/usr/gen_init_cpio.c
74171 --- linux-3.0.4/usr/gen_init_cpio.c 2011-07-21 22:17:23.000000000 -0400
74172 +++ linux-3.0.4/usr/gen_init_cpio.c 2011-08-23 21:47:56.000000000 -0400
74173 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
74174 int retval;
74175 int rc = -1;
74176 int namesize;
74177 - int i;
74178 + unsigned int i;
74179
74180 mode |= S_IFREG;
74181
74182 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
74183 *env_var = *expanded = '\0';
74184 strncat(env_var, start + 2, end - start - 2);
74185 strncat(expanded, new_location, start - new_location);
74186 - strncat(expanded, getenv(env_var), PATH_MAX);
74187 - strncat(expanded, end + 1, PATH_MAX);
74188 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
74189 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
74190 strncpy(new_location, expanded, PATH_MAX);
74191 + new_location[PATH_MAX] = 0;
74192 } else
74193 break;
74194 }
74195 diff -urNp linux-3.0.4/virt/kvm/kvm_main.c linux-3.0.4/virt/kvm/kvm_main.c
74196 --- linux-3.0.4/virt/kvm/kvm_main.c 2011-07-21 22:17:23.000000000 -0400
74197 +++ linux-3.0.4/virt/kvm/kvm_main.c 2011-08-23 21:47:56.000000000 -0400
74198 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
74199
74200 static cpumask_var_t cpus_hardware_enabled;
74201 static int kvm_usage_count = 0;
74202 -static atomic_t hardware_enable_failed;
74203 +static atomic_unchecked_t hardware_enable_failed;
74204
74205 struct kmem_cache *kvm_vcpu_cache;
74206 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
74207 @@ -2176,7 +2176,7 @@ static void hardware_enable_nolock(void
74208
74209 if (r) {
74210 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
74211 - atomic_inc(&hardware_enable_failed);
74212 + atomic_inc_unchecked(&hardware_enable_failed);
74213 printk(KERN_INFO "kvm: enabling virtualization on "
74214 "CPU%d failed\n", cpu);
74215 }
74216 @@ -2230,10 +2230,10 @@ static int hardware_enable_all(void)
74217
74218 kvm_usage_count++;
74219 if (kvm_usage_count == 1) {
74220 - atomic_set(&hardware_enable_failed, 0);
74221 + atomic_set_unchecked(&hardware_enable_failed, 0);
74222 on_each_cpu(hardware_enable_nolock, NULL, 1);
74223
74224 - if (atomic_read(&hardware_enable_failed)) {
74225 + if (atomic_read_unchecked(&hardware_enable_failed)) {
74226 hardware_disable_all_nolock();
74227 r = -EBUSY;
74228 }
74229 @@ -2498,7 +2498,7 @@ static void kvm_sched_out(struct preempt
74230 kvm_arch_vcpu_put(vcpu);
74231 }
74232
74233 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74234 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74235 struct module *module)
74236 {
74237 int r;
74238 @@ -2561,7 +2561,7 @@ int kvm_init(void *opaque, unsigned vcpu
74239 if (!vcpu_align)
74240 vcpu_align = __alignof__(struct kvm_vcpu);
74241 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
74242 - 0, NULL);
74243 + SLAB_USERCOPY, NULL);
74244 if (!kvm_vcpu_cache) {
74245 r = -ENOMEM;
74246 goto out_free_3;
74247 @@ -2571,9 +2571,11 @@ int kvm_init(void *opaque, unsigned vcpu
74248 if (r)
74249 goto out_free;
74250
74251 - kvm_chardev_ops.owner = module;
74252 - kvm_vm_fops.owner = module;
74253 - kvm_vcpu_fops.owner = module;
74254 + pax_open_kernel();
74255 + *(void **)&kvm_chardev_ops.owner = module;
74256 + *(void **)&kvm_vm_fops.owner = module;
74257 + *(void **)&kvm_vcpu_fops.owner = module;
74258 + pax_close_kernel();
74259
74260 r = misc_register(&kvm_dev);
74261 if (r) {